diff --git "a/4987.jsonl" "b/4987.jsonl"
new file mode 100644--- /dev/null
+++ "b/4987.jsonl"
@@ -0,0 +1,1330 @@
+{"seq_id":"45127193798","text":"import os\nimport sys\n\nimport unittest\nfrom lxml import etree as ET\n\n# allows us to run this from the project root\nsys.path.append(os.path.realpath('.'))\n\nfrom lvfs.util import _markdown_from_root, _xml_from_markdown, _get_update_description_problems\n\nclass MarkdownTest(unittest.TestCase):\n\n def test_appstream_convert(self):\n\n markup = \"\"\"\n
CHANGES IN THIS RELEASE
\nVersion 11.8.55.3510
\n[Important updates]
\n\n\n[New functions & enhancements]
\n\n- Nothing.
\n- Nothing more.
\n
\n[Problem fixes]
\n\n\"\"\"\n markdown = _markdown_from_root(ET.fromstring('' + markup + ''))\n print('`'+markdown+'`')\n\n # convert from markdown back to XML\n root = _xml_from_markdown(markdown)\n xml = ET.tostring(root, pretty_print=True)\n print('`'+xml.decode()+'`')\n\n # show problems\n for problem in _get_update_description_problems(root):\n print(' * %s' % problem.description)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"globotree/lvfs-website","sub_path":"lvfs/main/markdown_test.py","file_name":"markdown_test.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"}
+{"seq_id":"21400815491","text":"pessoas = list()\ndados = list()\nwhile True:\n dados.append(str(input(\"Informe seu nome: \")))\n dados.append(float(input(\"Informe seu peso: \")))\n escolha = str(input(\"Deseja continuar?[S/N]\"))\n\n if len(pessoas) == 0:\n maior = menor = dados[1]\n else:\n if dados[1] > maior:\n maior = dados[1]\n elif dados[1] < menor:\n menor = dados[1]\n pessoas.append(dados[:])\n dados.clear()\n\n if escolha in \"Nn\":\n break\nprint(20*'=-')\n\nprint(f'Numero de pessoas que foram cadastradas {len(pessoas)}')\n\nprint(f' maior peso: ', end=\"\")\nfor p in pessoas:\n if p[1] == maior:\n print(f'{p[0]}', end=\" \")\n\nprint()\n\nprint(f\" menor peso: \", end=\" \")\nfor p in pessoas:\n if p[1] == menor:\n print(f\"{p[0]}\", end=' ')\n\n\n\n\n\n","repo_name":"GGMmattos/Py","sub_path":"Desafios em Python(curso em video)/Mundo 3/d084.py","file_name":"d084.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"27623506365","text":"#! /usr/bin/python\n# -*- encoding: utf-8 -*-\n\nfrom random import randint, uniform\nfrom collections import deque\nfrom math import log, sqrt, fabs\n\ndef generarTiempoReparacion():\n x = randint(1,100)*1.0/100.0\n if x == 1.0:\n x = 0.99\n tiempo = - (log(1-x))/2\n return round(tiempo)\n \ndef generarTiempoFallo():\n x = randint(1,100)*1.0/100.0\n if x == 1.0:\n x = 0.99\n falla = - log(1-x)\n return round(falla)\n\ndef simulacion(n,s):\n\n\tcolaTrabajo = []\n\tcolaRepuesto = []\n\tcolaReparacion = []\n\n\tfalloSistema = False\n\ttiempo = 0\n\n\tfor i in range(0,n):\n\t\tcolaTrabajo.append(generarTiempoFallo())\n\tfor j in range(0,s):\n\t\tcolaRepuesto.append(generarTiempoFallo())\n\n\testa_reparando = False\n\treparacion = 0\n\t\n\n\twhile not falloSistema:\n\n\t\tfor i in colaTrabajo:\n\t\t\tif i <= tiempo:\n\t\t\t\tif len(colaRepuesto) > 0:\n\t\t\t\t\tcolaTrabajo.remove(i)\n\t\t\t\t\tcolaReparacion.append(generarTiempoReparacion())\n\t\t\t\t\tcolaTrabajo.append(tiempo + colaRepuesto.pop(0))\n\t\t\t\telif len(colaRepuesto) <= 0:\n\t\t\t\t\tfalloSistema = True\n\t\t\t\t\treturn tiempo\n\n\t\n\t\tif (reparacion <= 0 and not esta_reparando):\n\t\t\tif (len(colaReparacion) > 0):\n\t\t\t\treparacion = colaReparacion.pop(0)\n\t\t\t\testa_reparando = True\n\t\telif (reparacion <= 0 and esta_reparando):\n\t\t\tcolaRepuesto.append(generarTiempoFallo())\n\t\t\testa_reparando = False\n\t\t\tif (len(colaReparacion) > 0):\n\t\t\t\treparacion = colaReparacion.pop(0)\n\t\t\t\testa_reparando = True\n\n\t\treparacion -=1\n\t\ttiempo +=1\n\nn = int(input('introduzca numero de maquinas: '))\ns = int(input('introduzca numero de maquinas de repuesto: '))\nr = int(input('introduzca numero de repeticiones: '))\n\nt = 0\ni = 0\nwhile i < r:\n\tt += simulacion(n,s)\n\ti+=1\n\nprint('el tiempo esperado es: ', t/r)\n","repo_name":"J0hnG4lt/modelos_de_simulacion","sub_path":"sahid/problema_iv.py","file_name":"problema_iv.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1964403373","text":"import unittest\n# from unittest import mock\n\nfrom modi_plus.task.connection_task import ConnectionTask\n\n\nclass TestConnTask(unittest.TestCase):\n \"\"\"Tests for 'ConnTask' class\"\"\"\n\n def setUp(self):\n \"\"\"Set up test fixtures, if any.\"\"\"\n self.mock_kwargs = {\"serialport_recv_q\": None, \"serialport_send_q\": None}\n self.connection_task = ConnectionTask(**self.mock_kwargs)\n\n def tearDown(self):\n \"\"\"Tear down test fixtures, if any.\"\"\"\n del self.connection_task\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"LUXROBO/pymodi-plus","sub_path":"tests/task/test_conn_task.py","file_name":"test_conn_task.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36162187802","text":"from matplotlib.pyplot import show, hist\r\nimport random\r\n\r\n\r\n#Change in balance function in case the rate fluctuates\r\ndef changeInBalance(account_balance):\r\n #random.gauss creates a gaussean bell curve distribution with average=3% and standard deviation=2%\r\n return account_balance * random.gauss(0.03, 0.02)\r\n\r\n\r\n#Initial Conditions\r\nnum_of_years = 10\r\nsimulationCount = 1000\r\n#Set list to store data of final balances after the time period\r\nfinalBalances = []\r\n\r\nfor i in range(simulationCount):\r\n #set initial conditions for time and balance\r\n time = 0\r\n balance = 10000\r\n\r\n while time < num_of_years:\r\n # increase balance and time\r\n balance += changeInBalance(balance)\r\n time += 1\r\n\r\n finalBalances.append(balance)\r\n\r\n\r\n#plot the data\r\nhist(finalBalances, bins=20)\r\nshow()","repo_name":"JBrownCS/Account_Simulations","sub_path":"AccountSimulationBellCurve.py","file_name":"AccountSimulationBellCurve.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25830960417","text":"from datetime import datetime\nfrom numpy import array\nfrom os import system as do\nimport numpy as np\nfrom math import floor\n\nlogfile = \"log.txt\"\n\nsizes = [\"720p\", \"1080p\", \"4k\"]\nblocks = [1, 2, 5, 10, 20, 30]\nthreads_x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]\nthreads_y = []\n\ndo(\"mkdir in 2>/dev/null\")\n# for t_num in blocks:\n# do(f\"mkdir out_{t_num} 2>/dev/null\")\n\ndo(\"make\")\n\ndate = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\ndo(f\"echo 'run at {date}' >> {logfile}\")\n\nprint()\nfor size in sizes:\n\n print(f\"\\nsmoothing image of size {size}\")\n\n log_fname = f\"result_{size}.txt\"\n in_fname = f\"in/sana_noise{size}.jpg\"\n\n do(f\"echo __________ >> {logfile}\")\n do(f\"echo img size: {size} >> {logfile}\")\n for t_num in blocks:\n\n print(f\" with block= {t_num}\")\n\n # if size == \"720p\":\n # # threads_x = np.arange(10, 1280 / t_num,\n # # floor(0.1 * (1280 / t_num)))\n # # threads_y = np.arange(10, 720 / t_num, floor(0.1 * (720 / t_num)))\n # threads_x = []\n #\n # if size == \"1080p\":\n # threads_x = np.arange(10, 1920 / t_num,\n # floor(0.1 * (1920 / t_num)))\n # threads_y = np.arange(10, 1080 / t_num,\n # floor(0.1 * (1080 / t_num)))\n # if size == \"4k\":\n # threads_x = np.arange(10, 3840 / t_num,\n # floor(0.1 * (3840 / t_num)))\n # threads_y = np.arange(10, 2160 / t_num,\n # floor(0.1 * (2160 / t_num)))\n\n do(f\"echo .......... >> {logfile}\")\n do(f\"echo 'block = {t_num}' >> {logfile}\")\n out_fname = f\"out_{t_num}/sana_smoothed{size}.jpg\"\n for i in range(len(threads_x)):\n do(f\"echo 'thread x = {threads_x[i]}' >> {logfile}\")\n # do(f\"echo 'thread y = {threads_y[i]}' >> {logfile}\")\n do(f\"./cuda {t_num} {in_fname} {out_fname} {threads_x[i]} {threads_x[i]}>> {logfile}\"\n )\n # do(f\"echo './cuda {t_num} {in_fname} {out_fname} {threads_x[i]} {threads_x[i]}'>> {logfile}\"\n # )\n #Promedio\n # nums = 10\n # while nums > 0:\n # do(f\"./cuda {t_num} {in_fname} {out_fname} 0 0 >> logtpm.txt\")\n # nums = nums - 1\n # file = open(\"logtpm.txt\", \"r+\")\n # l = list(map(float, file.read().splitlines()))\n # file.seek(0)\n # file.truncate()\n # avg = sum(l) / len(l)\n # do(f\"echo 'time = {avg}' >> {logfile}\")\n\ndo(f\"echo ========== >> {logfile}\")\n","repo_name":"jjpulidos/Parallel-Computing-2021-1","sub_path":"Cuda/script_ejecutar_todo.py","file_name":"script_ejecutar_todo.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"38004286312","text":"import numpy as np\n\ndef PCA(df, k):\n x = np.array(df)\n\n # step 1: calculate the Mean normalization of x\n normalized_x = x - np.mean(x,axis=0)\n # step 2: calculate covariance matrix from the normalized_x\n cov = np.cov(normalized_x, rowvar=False)\n\n # make the parameter rowvar = False; check the documentation of np.cov to see why..\n\n # step 3: compute the eigen values and eigen vectors\n eig_val, eig_vec = np.linalg.eig(cov)\n\n # step 4: sort the eigen values in \"descending\" order, then use this sorted indicies to sort the eigen vectors.\n ind_sort = np.argsort(eig_val)[::-1]\n eig_val_sort = eig_val[ind_sort]\n eig_vect_sort = eig_vec[ind_sort]\n # print(eig_vect_sort)\n\n # step 5: select k eigen vectors\n reduced_eigen_vec = eig_vect_sort[:,:k]\n \n # step 6: transform the data\n z = normalized_x @ reduced_eigen_vec\n\n return z","repo_name":"matzolla/Project_GDA","sub_path":"modules/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"5296702778","text":"###############################################################################\n### Functions creating property-property plots\n###############################################################################\n\n### Description:\n# ...\n\n\n#------------------------------------------------------------------------------\n### Import packages\nimport matplotlib.pyplot as plt\nimport os\n\n\n#------------------------------------------------------------------------------\n### Set variables\n\n# Set figure size\nFIG_SIZE = 4\n\n# Set plot marker color\nMARKER_COL = 'black'\n\n\n#------------------------------------------------------------------------------\n### Functions\n\n# Create the delta T prop prop plot\ndef deltaT(vocab, prop_config, df, output_dir):\n\n\t# Set up the figure\n\tfig, ax = plt.subplots(figsize=(FIG_SIZE,FIG_SIZE))\n\n\t# Extract column header names from function input configs and plot\n\tx_header = vocab[prop_config['x-axis']]['col_header_name']\n\ty_header = vocab[prop_config['y-axis']]['col_header_name']\n\tax.scatter(df[x_header],df[y_header], marker='.', c=MARKER_COL)\n\n\t# Add grid and axis labels\n\tax.grid(True)\n\tplt.xlabel(vocab[prop_config['x-axis']]['fig_label_name_python'])\n\tplt.ylabel(vocab[prop_config['y-axis']]['fig_label_name_python'])\n\n\t# Save the plot to file and close figure\n\tfilename = 'deltaT.png'\n\tfilepath = os.path.join(output_dir, filename)\n\tplt.savefig(filepath, bbox_inches='tight')\n\tplt.close()","repo_name":"BjerknesClimateDataCentre/KPI","sub_path":"Script/quince_kpi/prop_plots.py","file_name":"prop_plots.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20413044758","text":"#!/usr/bin/env python3\n\"\"\"\nmodule for task 1\n\"\"\"\n\nimport numpy as np\n\n\ndef pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n \"\"\"\n forward propagation on pooling network\n \"\"\"\n m, h, w, c = A_prev.shape\n kh, kw = kernel_shape\n sh, sw = stride\n ph = ((h - kh) // sh) + 1\n pw = ((w - kw) // sw) + 1\n result = np.zeros((m, ph, pw, c))\n a = 0\n for x in range(0, (h - kh + 1), sh):\n b = 0\n for y in range(0, (w - kw + 1), sw):\n if mode == 'max':\n result[:, a, b, :] = np.max(\n A_prev[:, x:x + kh, y:y + kw, :], axis=(1, 2))\n elif mode == 'avg':\n result[:, a, b, :] = np.average(\n A_prev[:, x:x + kh, y:y + kw, :], axis=(1, 2))\n b = b + 1\n a = a + 1\n return result\n","repo_name":"not-notAlex/holbertonschool-machine_learning","sub_path":"supervised_learning/0x07-cnn/1-pool_forward.py","file_name":"1-pool_forward.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5716767863","text":"import csv\r\nimport sqlite3\r\n\r\n\r\nsqlite_file = 'ottawa.db'\r\n\r\nconn = sqlite3.connect(dbfile)\r\n\r\n\r\n# Get a cursor object\r\n\r\ncur = conn.cursor()\r\n\r\n\r\ndef unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):\r\n\r\n # csv.py doesn't do Unicode; encode temporarily as UTF-8:\r\n\r\n csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),\r\n\r\n dialect=dialect, **kwargs)\r\n\r\n for row in csv_reader:\r\n\r\n # decode UTF-8 back to Unicode, cell by cell:\r\n\r\n yield [unicode(cell, 'utf-8') for cell in row]\r\n\r\n\r\n\r\ndef utf_8_encoder(unicode_csv_data):\r\n\r\n for line in unicode_csv_data:\r\n\r\n yield line.encode('utf-8')\r\n\r\n \r\n\r\ndef UnicodeDictReader(utf8_data, **kwargs):\r\n\r\n csv_reader = csv.DictReader(utf8_data, **kwargs)\r\n\r\n for row in csv_reader:\r\n\r\n yield {key: value.encode('utf-8') for key, value in row.items()}\r\n\r\n\r\n# Create the table, specifying the column names and data types:\r\n\r\ncur.execute('''\r\n\r\n CREATE TABLE IF NOT EXISTS nodes_tags(id INTEGER, key TEXT, value TEXT,type TEXT)\r\n\r\n''')\r\n\r\ncur.execute('''\r\n\r\n CREATE TABLE IF NOT EXISTS nodes(id INTEGER, lat REAL, lon REAL, user TEXT, uid INTEGER, \r\n\r\n version INTEGER, changeset INTEGER, timestamp TIMESTAMP)\r\n\r\n''')\r\n\r\ncur.execute('''\r\n\r\n CREATE TABLE IF NOT EXISTS ways(id INTEGER, user TEXT, uid INTEGER, changeset INTEGER, timestamp TIMESTAMP)\r\n\r\n''')\r\n\r\ncur.execute('''\r\n\r\n CREATE TABLE IF NOT EXISTS ways_tags(id INTEGER, key TEXT, value TEXT, type TEXT) \r\n\r\n''')\r\n\r\ncur.execute('''\r\n\r\n CREATE TABLE IF NOT EXISTS ways_nodes(id INTEGER, node_id INTEGER, position INTEGER)\r\n\r\n''')\r\n\r\n\r\n\r\n# commit the changes\r\n\r\nconn.commit()\r\n\r\n\r\n\r\n# Read in the csv file as a dictionary, format the\r\n\r\n# data as a list of tuples:\r\n\r\nwith open('nodes_tags.csv','r') as fin:\r\n\r\n dr = UnicodeDictReader(fin) # comma is default delimiter\r\n\r\n to_db = [('id', 'key','value', 'type') for i in dr] \r\n\r\n\r\n\r\nwith open('nodes.csv', 'r') as fin2:\r\n\r\n dr2 = UnicodeDictReader(fin2)\r\n\r\n to_db2 = [('id','lat','lon','user','uid', 'version','changeset','timestamp') for i in dr2]\r\n\r\n \r\n\r\nwith open('ways.csv', 'r') as fin3:\r\n\r\n dr3 = UnicodeDictReader(fin3)\r\n\r\n to_db3 = [('id','user','uid','changeset','timestamp') for i in dr3]\r\n\r\n \r\n\r\nwith open('ways_tags.csv', 'r') as fin4:\r\n\r\n dr4 = UnicodeDictReader(fin4)\r\n\r\n to_db4 = [('id','key','value','type') for i in dr4] \r\n\r\n \r\n\r\nwith open('ways_nodes.csv', 'r') as fin5:\r\n\r\n dr5 = UnicodeDictReader(fin5)\r\n\r\n to_db5 = [('id','node_id','position') for i in dr5] \r\n\r\n \r\n\r\n # insert the formatted data\r\n\r\ncur.executemany(\"INSERT INTO nodes_tags(id, key, value,type) VALUES (?, ?, ?, ?);\", to_db)\r\n\r\ncur.executemany(\"INSERT INTO nodes(id, lat, lon, user, uid, version, changeset, timestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?);\", to_db2)\r\n\r\ncur.executemany(\"INSERT INTO ways(id, user, uid, changeset, timestamp) VALUES (?, ?, ?, ?, ?);\", to_db3)\r\n\r\ncur.executemany(\"INSERT INTO ways_tags(id, key, value, type) VALUES (?, ?, ?, ?);\", to_db4)\r\n\r\ncur.executemany(\"INSERT INTO ways_nodes(id, node_id, position) VALUES (?, ?, ?);\", to_db5)\r\n\r\n\r\n\r\n# commit the changes\r\n\r\nconn.commit()\r\nconn.close()\r\n\r\n","repo_name":"AlabdulhadiSara/Data-Analysis-Nanodegree","sub_path":"Project 4/files/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"4645674361","text":"from db_utilities import DbUtiltity\n\nclass Brand:\n def __init__(self, p_brand_name='', p_models=[]):\n self.name = p_brand_name\n self.models = p_models\n \n def serialize(self):\n return {\n 'name': self.name,\n 'models': self.models\n }\n\ndef get_all_brands():\n _brands = []\n tmp_dict = {}\n with DbUtiltity() as db_util:\n\n result = db_util.find_all_brands()\n for rec in result:\n brand_name = rec['brand']\n model_name = rec['model']\n\n if brand_name in tmp_dict:\n tmp_dict[brand_name].append(model_name)\n else:\n tmp_dict[brand_name] = [model_name]\n \n _brands = [Brand(key, tmp_dict.get(key)) for key in tmp_dict]\n \n return _brands\n\ndef get_models_by_brand(brand_name):\n _models = []\n\n with DbUtiltity() as db_util:\n result = db_util.find_models_by_brand(brand_name)\n for rec in result:\n _models.append(rec['model'])\n \n return _models\n\ndef add_model_to_brand(brand, model):\n with DbUtiltity() as db_util:\n db_util.save_brand_model({\"brand\":brand, \"model\":model})\n","repo_name":"thipham-200467699/BDAT1007.Assignment1","sub_path":"brand.py","file_name":"brand.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8542548308","text":"import pathlib\nimport sys\n\nfolder_path = str(pathlib.Path(__file__).parent.absolute())\n\nsys.path.append(\".\")\nfrom check import Check\nfrom model import *\n\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n # ex. [\"\", \"l\", \"le\", \"lee\", \"leet\", \"leetc\", \"leetco\", \"leetcod\", \"leetcode\"]\n dp = [True] + [False] * len(s)\n\n for index in range(1, len(s) + 1):\n\n for word in wordDict:\n if dp[index - len(word)] and s[:index].endswith(word):\n dp[index] = True\n\n return dp[-1]\n\n\nif __name__ == \"__main__\":\n Check().run_code(solution=Solution(), test_data_path=f\"{folder_path}/test_data.txt\")\n","repo_name":"FawenYo/LeetCode","sub_path":"139. Word Break/word_break.py","file_name":"word_break.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11617519519","text":"import streamlit as st \nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nimport os.path\nfrom csv import writer\n\ndef calChargeOpt(wearValue, shellid, selectedCampaign, selectedScan):\n filenameDir = 'HUSAB/' + 'CAMPAIGN_' + selectedCampaign + '/SCAN-' + selectedScan.replace('-', '') + '/chargeOptions.csv'\n with open(filenameDir, 'r') as f:\n lines = f.readlines()\n chargeOptionList = []\n for line in lines:\n tmp = []\n for a in line.strip('\\n').split(','):\n tmp.append(round(float(a), 1))\n chargeOptionList.append(tmp)\n f.close()\n #chargeOptDF = pd.read_csv(filenameDir)\n wearmatrix = chargeOptionList\n # add rowid as an input\n rowid=shellid\n chargeOpt = wearmatrix[rowid]\n return chargeOpt\n\ndef calTrajImageName(shellid, chargeLevid, speed, selectedCampaign, selectedScan):\n shellid=shellid+1\n #chargeLevid=0 #replace this when receive full images\n filenameDir = 'HUSAB/' + 'CAMPAIGN_' + selectedCampaign + '/SCAN-' + selectedScan.replace('-', '')\n massid = int(50*chargeLevid+300)\n rpmid = speed.replace(\".\", \"p\")\n wear_image_name = filenameDir +'/shell'+ str(shellid)+'_mass'+str(massid)+'_'+rpmid+'.png'\n return wear_image_name\n\ndef calMaxRMP(wearValue, chargeLevid, selectedCampaign, selectedScan):\n shellid=wearValue\n filenameDir = 'HUSAB/' + 'CAMPAIGN_' + selectedCampaign + '/SCAN-' + selectedScan.replace('-', '') + '/maxRPM.csv'\n with open(filenameDir, 'r') as f:\n lines = f.readlines()\n RPMmatrix = []\n for line in lines:\n tmp = []\n for a in line.strip('\\n').split(','):\n tmp.append(float(a))\n RPMmatrix.append(tmp)\n f.close()\n #print(RPMmatrix)\n\n maxrpmVal=RPMmatrix[shellid][chargeLevid]\n imageName=calTrajImageName(wearValue, chargeLevid, str(maxrpmVal), selectedCampaign, selectedScan)\n\n return maxrpmVal, imageName\n\ndef calMinChargeLev(wearValue, speed, speedid, selectedCampaign, selectedScan):\n shellid=wearValue\n filenameDir = 'HUSAB/' + 'CAMPAIGN_' + selectedCampaign + '/SCAN-' + selectedScan.replace('-', '') + '/minCharge.csv'\n with open(filenameDir, 'r') as f:\n lines = f.readlines()\n Chargematrix = []\n for line in lines:\n tmp = []\n for a in line.strip('\\n').split(','):\n tmp.append(int(a))\n Chargematrix.append(tmp)\n f.close()\n\n minChargeVal=Chargematrix[shellid][speedid]\n\n if minChargeVal == 0:\n for i in range(0,len(Chargematrix[0])):\n if Chargematrix[shellid][i] != 0:\n ChargeVal = Chargematrix[shellid][i]\n safeRPMid=i\n break\n elif minChargeVal == -1:\n for i in range(-1,-1*(len(Chargematrix[0])+1),-1):\n if Chargematrix[shellid][i] != -1:\n ChargeVal = Chargematrix[shellid][i]\n safeRPMid = i+len(Chargematrix[0])\n break\n else:\n ChargeVal = minChargeVal\n safeRPMid = speedid\n\n chargeLevid=int((ChargeVal-300)/50)\n ChargeOpt = calChargeOpt(wearValue, shellid, selectedCampaign, selectedScan)\n ChargeLev = ChargeOpt[chargeLevid]\n #print(\"image input=\",wearValue, ChargeVal, chargeLevid, speed)\n chargeimageName=calTrajImageName(shellid, chargeLevid, str(speed), selectedCampaign, selectedScan) \n\n return minChargeVal, ChargeLev, safeRPMid, chargeimageName\n\n\ndef local_pvModel(file_name):\n st.markdown(\n f'',\n unsafe_allow_html=True,\n )\n\ndef GET_SCAN_DATES(campaign):\n scanDatesFile = 'HUSAB/' + \"CAMPAIGN_\" + campaign + '/scanDates.csv'\n scanDatesDF = pd.read_csv(scanDatesFile)\n relineDate = scanDatesDF[\"SCAN_DATES\"].iloc[-1]\n scanDates = scanDatesDF[\"SCAN_DATES\"].values\n campaignDir = 'HUSAB/' + \"CAMPAIGN_\" + campaign\n return relineDate, scanDates, campaignDir\n\n####################################### MAIN ################################\ndef app():\n # add sidebar elements\n st.sidebar.markdown(\"---\")\n #st.sidebar.markdown(\"Please Select Shell Liner Campaign\")\n st.session_state.selectCampaign = st.sidebar.radio(\n \"Please Select Shell Liner Campaigns\",\n [\"OCT22-APR23\", \"APR22-OCT22\"],\n )\n #st.session_state.selectCampaign\n st.header(\"Shell Wear Evolution\")\n st.markdown(\"---\")\n\n #wearEvo = '🎢 SHELL GEOMETRY EVOLUTION
'\n #st.markdown(wearEvo,unsafe_allow_html=True)\n shellEvlv = st.container()\n with shellEvlv:\n shellslider, shellprof = shellEvlv.columns([1,2])\n #st.session_state.wearInput=None\n #st.session_state.updateProfclicked =False\n #st.session_state.selectedWear = None\n relineDate, scanDates, campaignDir = GET_SCAN_DATES(st.session_state.selectCampaign)\n with shellslider:\n #st.markdown(relineDate)\n #st.markdown(scanDates)\n #st.markdown(scanDates[0])\n st.info(\"🚩 Shell liners from the current campaign was installed on \" + relineDate)\n st.session_state.wearvalue=0\n st.markdown(\"###\")\n st.session_state.modeSelect = st.radio(\n \"Please Select Shell Liner Profile:\",\n (scanDates))\n st.markdown(\"###\")\n\n # 2D\n st.session_state.selectWear_clicked = st.button(\"🚀 Select This Profile\")\n #try:\n if st.session_state.selectWear_clicked: \n st.session_state.selectedWear = st.session_state.wearvalue\n st.success(f'✅ ' + st.session_state.modeSelect + ' Shell profile is Sellected.')\n #except:\n # if selectWear_clicked:\n # st.session_state.selectWear_clicked=True\n # st.session_state.selectedWear = st.session_state.wearvalue\n # st.success(f'✅ Latest Shell profile is Sellected. Now you can move below for trajectory predictions.') \n\n\n with shellprof:\n #st.plotly_chart(wearProfPlot, use_container_width=False)\n #st.bokeh_chart(p, use_container_width=True)\n imName = campaignDir + '/shell2d.jpg'\n ##wearfilename = 'shellProfile2D/' + imName\n image = Image.open(imName)\n st.image(image, caption='Selected Shell Liner Profile')\n #else:\n # pvLINK = \"https://kycg.s3.ap-east-1.amazonaws.com/husab/SHELL-10OCT22.html\"\n # with st.spinner('Loading 3D Worn Shell ...'):\n # local_pvModel(pvLINK)\n\n #perfmPredic = '📈 SHELL PERFORMANCE PREDICTION
'\n #st.markdown(perfmPredic, unsafe_allow_html=True)\n st.header(\"Shell Trajectory Prediction\")\n st.markdown(\"---\")\n shellPerf = st.container()\n with shellPerf:\n shellOper, spacer, shellTraj = shellPerf.columns([2,1,2])\n with shellOper:\n direction = st.radio(\n '1. Please Select Mill Rotational Direction?',\n ('FE -> DE Counter-Clock-Wise', \n 'FE -> DE Clock-Wise'))\n st.markdown(\"###\")\n option = st.radio(\n '2. Please Select Which Performance to Predict?',\n ('Total Charge + Mill Speed -> Trajectory', \n 'Total Charge -> Maximum Mill Speed', \n 'Mill Speed -> Safe Total Charge level'))\n st.session_state.option = option\n st.session_state.direction = direction\n # define shell id\n if st.session_state.direction == 'FE -> DE Counter-Clock-Wise':\n st.session_state.shellid = 0 # worn as second row\n st.markdown(\"###\")\n st.success(\"✅ The latest shell profile scanned on \" + st.session_state.modeSelect + \" was utilised, please confirm the mill rotational direction!\")\n direction1 = Image.open(\"millDirection/FEDE_LH.png\")\n st.image(direction1)\n elif st.session_state.direction == 'FE -> DE Clock-Wise':\n st.session_state.shellid = 1 # new as first row\n st.markdown(\"###\")\n st.success(\"✅ The latest shell profile scanned on \" + st.session_state.modeSelect + \" was utilised, please confirm the mill rotational direction!\")\n direction2 = Image.open(\"millDirection/FEDE_RH.png\")\n st.image(direction2)\n st.markdown(\"###\")\n st.success('✅ You have selected: ' + direction + ' ' + option)\n st.markdown(\"-------------------------------------------\")\n #st.write(' ')\n \n if option == 'Total Charge + Mill Speed -> Trajectory':\n st.session_state.computeTraj_clicked= False\n st.session_state.trajImageName = None \n #st.markdown(st.session_state.selectedWear) \n try:\n if st.session_state.selectedWear is not None:\n # Total Charge Level input\n chargeOpt=calChargeOpt(st.session_state.selectedWear, st.session_state.shellid, st.session_state.selectCampaign, st.session_state.modeSelect) # add a row ID input\n #st.dataframe(chargeOpt)\n chargelevel = st.select_slider('Please Select Total Charge Level - %', \n options=chargeOpt, value=chargeOpt[int(len(chargeOpt)/2-1)])\n st.session_state.chargelevel = chargelevel\n st.session_state.chargeid = chargeOpt.index(chargelevel)\n # define ball charge level\n #ballOpt=np.around(np.linspace(start = 12.0, stop = 16.0, num = 5, endpoint = True), decimals=1)\n #balllevel = st.select_slider('Please Select Ball Charge Level - %', options=ballOpt, value=14.0)\n #st.session_state.balllevel = balllevel\n\n # Speed input\n speedOpt=np.around(np.linspace(start = 8.4, stop = 10.2, num = 19, endpoint = True), decimals=1)\n millSpeed = st.select_slider('Please Select Mill Speed - RPM', options=speedOpt, value=9.4)\n st.session_state.millSpeed = millSpeed\n # call associated results image\n trajImageName=calTrajImageName(st.session_state.shellid, st.session_state.chargeid, str(st.session_state.millSpeed), st.session_state.selectCampaign, st.session_state.modeSelect)\n #st.markdown(trajImageName)\n st.session_state.trajImageName=trajImageName\n\n\n st.markdown(\"-------------------------------------------\")\n computeTraj_clicked = st.button(\"🚀 Compute Trajectory\")\n if computeTraj_clicked:\n st.session_state.computeTraj_clicked= True \n except:\n #if st.session_state.computeTraj_clicked:\n # st.warning('Wear profile hasn\\'t selected!')\n pass\n\n elif option == 'Total Charge -> Maximum Mill Speed': \n st.session_state.computeMaxRPM_clicked = False\n st.session_state.rpmimagename = \" \" \n try:\n if st.session_state.selectedWear is not None: \n chargeOpt1=calChargeOpt(st.session_state.selectedWear, st.session_state.shellid, st.session_state.selectCampaign, st.session_state.modeSelect) # add a row ID input\n chargelevel1 = st.select_slider('Please Select Total Charge Level - %', options=chargeOpt1, value=chargeOpt1[int(len(chargeOpt1)/2-1)])\n st.session_state.chargelevel1 = chargelevel1\n st.session_state.chargeid1 = chargeOpt1.index(chargelevel1)\n # define ball charge level\n #ballOpt1=np.around(np.linspace(start = 12.0, stop = 16.0, num = 5, endpoint = True), decimals=1)\n #balllevel1 = st.select_slider('Please Select Ball Charge Level - %', options=ballOpt1, value=14.0)\n #st.session_state.balllevel1 = balllevel1\n\n # Speed input\n computeMaxRPM_clicked = st.button(\"🚀 Compute Maximum RPM\")\n if computeMaxRPM_clicked:\n st.session_state.computeMaxRPM_clicked = True\n #st.markdown(\"Reached 0\")\n maxRPM, rpmimagename = calMaxRMP(st.session_state.shellid, st.session_state.chargeid1, st.session_state.selectCampaign, st.session_state.modeSelect)\n #st.markdown(maxRPM)\n #st.markdown(rpmimagename)\n \n st.session_state.maxRPM=maxRPM\n st.session_state.rpmimagename = rpmimagename\n st.metric(\"Maximum RPM = \", value= maxRPM) \n except:\n pass\n \n elif option == 'Mill Speed -> Safe Total Charge level': \n st.session_state.computeMinCharge_clicked = False\n st.session_state.chargeimageName = \" \" \n try:\n if st.session_state.selectedWear is not None:\n speedOpt1=np.around(np.linspace(start = 8.4, stop = 10.2, num = 19, endpoint = True), decimals=1)\n millSpeed1 = st.select_slider('Please Select Mill Speed - RPM', options=speedOpt1, value=9.4)\n #st.metric('millSpeed1', millSpeed1)\n st.session_state.millSpeed1 = millSpeed1\n st.session_state.millSpeedid1 = speedOpt1.tolist().index(millSpeed1)\n\n computeMinCharge_clicked = st.button(\"🚀 Compute Min Charge Level\")\n if computeMinCharge_clicked:\n st.session_state.computeMinCharge_clicked = True\n #st.metric('reached here', 100)\n minChargeVal, ChargeLev, safeRPMid, chargeimageName =calMinChargeLev(st.session_state.shellid, st.session_state.millSpeed1, st.session_state.millSpeedid1, st.session_state.selectCampaign, st.session_state.modeSelect)\n st.session_state.minChargeVal = minChargeVal\n st.session_state.ChargeLev = ChargeLev\n st.session_state.chargeimageName = chargeimageName\n safeRPM = speedOpt1[safeRPMid]\n \n st.session_state.safeRPM = safeRPM \n if st.session_state.minChargeVal == 0:\n st.warning(f'Warning: At current shell profile, mill speed cannot be below {st.session_state.safeRPM :.2f} RPM, and the min charge level is {st.session_state.ChargeLev :.2f} %.')\n elif st.session_state.minChargeVal == -1:\n st.warning(f'Warning: At current shell profile, mill speed cannot be above {st.session_state.safeRPM :.2f} RPM, and the min charge level is {st.session_state.ChargeLev :.2f} %.')\n else:\n st.metric(\"Minimum Charge Level (%) = \", value= st.session_state.ChargeLev) \n except:\n pass\n with shellTraj:\n if st.session_state.option == 'Total Charge + Mill Speed -> Trajectory': \n if st.session_state.computeTraj_clicked: \n #print(\"show trajectory image\",st.session_state.trajImageName) \n if os.path.exists(st.session_state.trajImageName):\n trajImage = Image.open(st.session_state.trajImageName)\n st.image(trajImage, caption='Shell Trajectory', use_column_width=True)\n #else:\n # st.image(\"HUSAB/blank.png\", caption='Shell trajectory image doesn\\'t exist', use_column_width=True)\n\n elif st.session_state.option == 'Total Charge -> Maximum Mill Speed':\n if st.session_state.computeMaxRPM_clicked: \n if os.path.exists(st.session_state.rpmimagename):\n trajImage = Image.open(st.session_state.rpmimagename)\n st.image(trajImage, caption=f'Shell Trajectory at Maximum RPM = {st.session_state.maxRPM :.2f}', use_column_width=True)\n #else:\n # st.image(\"HUSAB/blank.png\", caption='Shell trajectory image at Maximum RPM doesn\\'t exist', use_column_width=True)\n elif st.session_state.option == 'Mill Speed -> Safe Total Charge level':\n if st.session_state.computeMinCharge_clicked: \n if os.path.exists(st.session_state.chargeimageName):\n trajImage = Image.open(st.session_state.chargeimageName)\n st.image(trajImage, caption=f'Shell Trajectory at Mininum Charge Level = {st.session_state.ChargeLev :.2f}', use_column_width=True)\n else:\n st.image(\"HUSAB/blank.png\", caption='Shell trajectory image at Mininum Charge Level doesn\\'t exist', use_column_width=True)\n \n \n st.markdown(\"-------------------------------------------\")\n with st.container():\n st.subheader(\"Warranties and Liability\")\n st.warning(\"\"\"\n 1. Bradken makes no other warranties of any kind in connection with Grindmaster, whether express or implied, and specifically disclaims any and all implied warranties for suitability, completeness, accuracy, or fitness for any particular purpose, to the maximum extent permitted by law. \\n\n 2. Bradken shall in no event be liable for any loss or liability incurred by User arising in connection with the use of Grindmaster.\n 3. Bradken shall in no event be liable to User for any Consequential Loss in connection with this Agreement.\n 4. User warrants and represents that it shall its use of Grindmaster shall be for lawful purposes, and that Grindmaster shall not be used to support any business activity contrary to any applicable laws including but not limited to laws related to:\n (i)\t sanctions and export control,\n (ii)\tbribery and corruption, and\n (iii)\tanti-competitive behaviour.\n\n \"\"\"\n )\n\n\n st.markdown(\"Visit us @ \")\n st.markdown(\"\"\"\n All company names, logos, product names, and identifying marks used throughout this website are the property of their respective trademark owners. They are used for descriptive purposes only and are protected by the relevant laws of the countries in which the trademarks are registered.\n \"\"\"\n )\n st.markdown(\"©️ 2022 Copyright Bradken\")\n\n\n hide_streamlit_style = \"\"\"\n \n \"\"\"\n st.markdown(hide_streamlit_style, unsafe_allow_html=True) ","repo_name":"oresome/OptiGrind-HUSAB","sub_path":"pages/grindMaster.py","file_name":"grindMaster.py","file_ext":"py","file_size_in_byte":19659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11315760765","text":"def findDisappearedNumbers(nums):\n \"\"\"\n Given an array of integers where 1 ≤ a[i] ≤ n (n = size of array),\n some elements appear twice and others appear once.\n\n Find all the elements of [1, n] inclusive that do not appear in this array.\n\n Could you do it without extra space and in O(n) runtime?\n You may assume the returned list does not count as extra space.\n\n >>> findDisappearedNumbers([4,3,2,7,8,2,3,1])\n [5, 6]\n\n \"\"\"\n res = []\n for i in range(1, len(nums) + 1):\n nums[abs(nums[i - 1]) - 1] = abs(nums[abs(nums[i - 1]) - 1]) * -1\n for i in range(len(nums)):\n if nums[i] > 0:\n res.append(i + 1)\n return res\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(verbose=True)","repo_name":"AlexVines/my_leetcode_solutions","sub_path":"find_dissappeared_nums.py","file_name":"find_dissappeared_nums.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33549371930","text":"import numpy as np\nimport cv2\nimport scipy.io\nimport argparse\nfrom tqdm import tqdm\nfrom os import listdir\nfrom os.path import isfile, join\nimport sys\nimport dlib\nfrom moviepy.editor import *\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"This script cleans-up noisy labels \"\n \"and creates database for training.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--output\", \"-o\", type=str,\n help=\"path to output database mat file\")\n parser.add_argument(\"--img_size\", type=int, default=64,\n help=\"output image size\")\n \n args = parser.parse_args()\n return args\n\n\ndef main():\n args = get_args()\n output_path = './data/megaage_test'\n #output_path = './data/megaage_train'\n img_size = args.img_size\n\n mypath = './megaage_asian/test'\n #mypath = './megaage_asian/train'\n isPlot = False\n\n age_file = np.loadtxt('./megaage_asian/list/test_age.txt')\n #age_file = np.loadtxt('./megaage_asian/list/train_age.txt')\n img_name_file = np.genfromtxt('./megaage_asian/list/test_name.txt',dtype='str')\n #img_name_file = np.genfromtxt('./megaage_asian/list/train_name.txt',dtype='str')\n out_ages = []\n out_imgs = []\n\n for i in tqdm(range(len(img_name_file))):\n \n input_img = cv2.imread(mypath+'/'+img_name_file[i])\n input_img = input_img[20:-20,:,:]\n img_h, img_w, _ = np.shape(input_img)\n age = int(float(age_file[i]))\n if age >= -1:\n\t if isPlot:\n\t\t img_clip = ImageClip(input_img)\n\t\t img_clip.show()\n\t\t key = cv2.waitKey(1000)\n\n\t input_img = cv2.resize(input_img,(img_size,img_size))\n\t #only add to the list when faces is detected\n\t out_imgs.append(input_img)\n\t out_ages.append(int(age))\n\n np.savez(output_path,image=np.array(out_imgs), age=np.array(out_ages), img_size=img_size)\n\nif __name__ == '__main__':\n main()\n","repo_name":"b02901145/SSR-Net_megaage-asian","sub_path":"TYY_Megaage_asian_create_db.py","file_name":"TYY_Megaage_asian_create_db.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"53"}
+{"seq_id":"18952380177","text":"#!/usr/bin/env python3\n# two functions here can be imported to python3 programs as well, not sure\n# about __main__ section\n\nfrom datetime import date, datetime\n\nfrom xml.etree.ElementTree import (\n ElementTree, Element, SubElement\n)\n\n(MAX_GIVEN_NAME, MAX_MIDDLE_NAME, MAX_FAMILY_NAME, MAX_TELEPHONE_NUM,\n MAX_POSTAL_CODE) = (50, 100, 50, 50, 20)\n\nCIRCULATION_BUSINESS_CONTEXT=\"Circulation_Info\"\n\nclass OCLCPersonaException(Exception): pass\n\ndef process_address(persona,\n streetAddressLine1,\n streetAddressLine2=None,\n cityOrLocality=None,\n stateOrProvince=None,\n postalCode=None,\n country=None):\n if postalCode != None and len(postalCode)>MAX_POSTAL_CODE:\n raise OCLCPersonaException(\"Postal code longer than %d characters '%s'\" % (\n MAX_POSTAL_CODE, postalCode ) )\n\n contactInfo_address = SubElement(persona, \"contactInfo\")\n postalAddress = SubElement(contactInfo_address, 'postalAddress')\n SubElement(postalAddress, 'streetAddressLine1').text=streetAddressLine1\n if streetAddressLine2!=None:\n SubElement(postalAddress, 'streetAddressLine2').text=streetAddressLine2\n if cityOrLocality!=None:\n SubElement(postalAddress, 'cityOrLocality').text=cityOrLocality\n if stateOrProvince!=None:\n SubElement(postalAddress, 'stateOrProvince').text=stateOrProvince\n if postalCode!=None:\n SubElement(postalAddress, 'postalCode').text=postalCode\n if country!=None:\n SubElement(postalAddress, 'country').text=country\n\ndef add_WMS_circulation_persona(\n oclc_personas,\n\n # mandatory\n institutionId,\n \n # these are not mandatory in the schema but are for WMS Circulation\n barcode,\n borrowerCategory,\n homeBranch,\n\n # optional, but must use idAtSource and sourceSystem together\n idAtSource=None,\n sourceSystem=None,\n oclcUserName=None,\n\n # at lease one of these must be used\n givenName=None,\n familyName=None,\n\n middleName=None,\n\n # a list of email addresses, the first will be considered primary\n # unless email_primary is set to False\n emailAddresses=None,\n \n phoneNumbers=None,\n streetAddressLine1=None,\n streetAddressLine2=None,\n cityOrLocality=None,\n stateOrProvince=None,\n postalCode=None,\n country=None,\n additionalAddresses=None,\n note=None,\n expiry=None,\n\n email_primary=True,\n email_label=None,\n\n customData1=None,\n customData2=None,\n customData3=None,\n customData4=None,\n customData1BizContext=CIRCULATION_BUSINESS_CONTEXT,\n customData2BizContext=CIRCULATION_BUSINESS_CONTEXT,\n customData3BizContext=CIRCULATION_BUSINESS_CONTEXT,\n customData4BizContext=CIRCULATION_BUSINESS_CONTEXT,\n **kargs\n):\n if not (givenName!=None or familyName!=None):\n raise OCLCPersonaException(\n \"at least one of givename or familyName must be used\")\n\n if (emailAddresses==None and phoneNumbers==None and\n streetAddressLine1==None):\n raise OCLCPersonaException(\n \"at least an email address, phone number, or \"\n \"street address should be included\")\n\n if not ( (isinstance(emailAddresses, list) and len(emailAddresses)>0 )\n or\n (isinstance(phoneNumbers, list) and len(phoneNumbers)>0 )\n or\n streetAddressLine1!=None ):\n raise OCLCPersonaException(\n \"at least one email address (list length 1), \"\n \"one phone number (list length 1) \"\n \"or one street address must be included\"\n )\n\n if givenName != None and len(givenName)>MAX_GIVEN_NAME:\n raise OCLCPersonaException(\n \"Given name longer than %d characters '%s'\" % (\n MAX_GIVEN_NAME, givenName ) )\n\n if familyName != None and len(familyName)>MAX_FAMILY_NAME:\n raise OCLCPersonaException(\"Family name longer than %d characters '%s'\" % (\n MAX_FAMILY_NAME, familyName) )\n\n if middleName != None and len(middleName)>MAX_MIDDLE_NAME:\n raise OCLCPersonaException(\"Middle name longer than %d characters '%s'\"\n % (MAX_MIDDLE_NAME, middleName) )\n\n if phoneNumbers !=None and any( len(num)>MAX_TELEPHONE_NUM\n for num in phoneNumbers ):\n raise OCLCPersonaException(\n \"Phone number longer than %d characters '%s'\" % (\n MAX_TELEPHONE_NUM, ' '.join(phoneNumbers) ) )\n \n persona = SubElement(\n oclc_personas, 'persona', attrib={\"institutionId\": institutionId})\n \n \n if idAtSource!=None:\n if sourceSystem==None:\n raise OCLCPersonaException(\n \"sourceSystem must be defined when using idAtSource\")\n correlationInfo = SubElement(persona, 'correlationInfo')\n SubElement(correlationInfo, 'sourceSystem').text=sourceSystem\n SubElement(correlationInfo, 'idAtSource').text=idAtSource\n\n\n if oclcUserName!=None:\n SubElement(persona, 'oclcUserName').text=oclcUserName\n\n if expiry!=None:\n if isinstance(expiry, datetime):\n pass\n elif isinstance(expiry, date):\n # convert to a datetime with midnight as the time of day\n expiry = datetime.combine(expiry, datetime.min.time())\n else:\n raise OCLCPersonaException(\"expiry must be a date or datetime\")\n SubElement(persona, 'oclcExpirationDate').text=expiry.isoformat()\n \n nameInfo = SubElement(persona, 'nameInfo')\n\n if givenName!=None:\n SubElement(nameInfo, 'givenName').text=givenName\n\n if middleName!=None:\n SubElement(nameInfo, 'middleName').text=middleName\n\n if familyName!=None:\n SubElement(nameInfo, 'familyName').text=familyName\n\n wmsCircPatronInfo = SubElement(persona, 'wmsCircPatronInfo')\n\n SubElement(wmsCircPatronInfo, 'barcode').text=barcode\n SubElement(wmsCircPatronInfo, 'borrowerCategory').text=borrowerCategory\n SubElement(wmsCircPatronInfo, 'homeBranch').text=homeBranch\n\n if emailAddresses!=None:\n for i, emailAddress in enumerate(emailAddresses):\n contactInfo_email = SubElement(persona, \"contactInfo\")\n email = SubElement(contactInfo_email, 'email')\n SubElement(email, 'emailAddress').text=emailAddress\n if email_primary:\n if i == 0: # first email address is primary\n SubElement(email, 'isPrimary').text='true'\n else:\n SubElement(email, 'isPrimary').text='false'\n if email_label!=None:\n SubElement(contactInfo_email, 'label').text=email_label\n\n if phoneNumbers!=None:\n for phoneNumber in phoneNumbers:\n contactInfo_phone = SubElement(persona, \"contactInfo\")\n Phone = SubElement(contactInfo_phone, 'phone')\n SubElement(Phone, 'number').text=phoneNumber\n\n\n if streetAddressLine1!=None:\n process_address(\n persona,\n streetAddressLine1,\n streetAddressLine2,\n cityOrLocality,\n stateOrProvince,\n postalCode,\n country)\n\n if None!=additionalAddresses:\n for addr in additionalAddresses:\n process_address(persona, **addr)\n\n \n if note!=None:\n note_element = SubElement(persona, 'note')\n SubElement(note_element, 'text').text=note\n\n for i, (custom_data, biz_context) in enumerate( (\n (customData1, customData1BizContext),\n (customData2, customData2BizContext),\n (customData3, customData3BizContext),\n (customData4, customData4BizContext), ),\n start=1):\n if custom_data != None:\n addinfo = SubElement(persona, 'additionalInfo')\n if biz_context != None:\n SubElement(addinfo, 'businessContext').text=biz_context\n SubElement(addinfo, 'key').text=(\"customdata%d\" % i)\n SubElement(addinfo, 'value').text=custom_data\n\ndef create_personas_element():\n return Element(\n 'oclcPersonas',\n attrib={'xmlns':\"http://worldcat.org/xmlschemas/IDMPersonas-2.2\",\n 'xmlns:xsi':\"http://www.w3.org/2001/XMLSchema-instance\",\n 'xsi:schemaLocation':\n \"http://worldcat.org/xmlschemas/IDMPersonas-2.2 \"\n \"IDMPersonas-2.2.xsd\"\n } # attrib\n ) # Element\n\nif __name__ == \"__main__\":\n from sys import stdout\n\n oclc_personas = create_personas_element()\n testPatrons = (\n dict(\n institutionId='1234567',\n barcode='123456789',\n borrowerCategory='P',\n homeBranch='mainBranch',\n\n idAtSource='torvalds-l',\n sourceSystem='LDAP',\n \n givenName='Linus',\n familyName='Torvalds',\n\n emailAddresses=['linus@example.tld'],\n\n note='Famous, treat with kid gloves, penguins bite'\n ),\n\n dict(\n institutionId='1234567',\n barcode='123456789',\n borrowerCategory='P',\n homeBranch='mainBranch',\n\n idAtSource='john-c',\n sourceSystem='LDAP',\n \n givenName='John',\n\n phoneNumbers=['+12042222222', '+999-800-PIZZA-ZA'],\n\n note='Great pizza for a great price'\n ),\n\n dict(\n institutionId='1234567',\n barcode='123456789',\n borrowerCategory='P',\n homeBranch='mainBranch',\n\n oclcUserName='123456789',\n \n familyName='Ricardo',\n\n emailAddresses=['ricardo@example.tld', 'ric@new.tld'],\n phoneNumbers=['+999-800-12345-67'],\n\n streetAddressLine1='666 example st.',\n cityOrLocality='Beverly Hills',\n stateOrProvince='California',\n postalCode='90210',\n expiry=datetime(2018,1,1,13,0)\n ),\n\n dict(\n institutionId='1234567',\n barcode='123456789',\n borrowerCategory='P',\n homeBranch='mainBranch',\n\n givenName='Joe',\n familyName='DiMaggio',\n\n streetAddressLine1='123 example bay',\n streetAddressLine2='c/o Rosalia',\n cityOrLocality='Martinez',\n stateOrProvince='California',\n country='United States',\n note='Our nation turns its lonely eyes to you',\n expiry=date(2018,1,1),\n\n customData1=\"hello world\",\n ),\n \n \n ) # testPatrons tuple\n\n for testPatron in testPatrons:\n add_WMS_circulation_persona(oclc_personas, **testPatron)\n ET = ElementTree(oclc_personas)\n ET.write(stdout.buffer)\n","repo_name":"uwinnipeglibrarysystems/oclcpersonaxml","sub_path":"loadpersona.py","file_name":"loadpersona.py","file_ext":"py","file_size_in_byte":10964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"9228301876","text":"import argparse\n\nfrom sibyl.core import Sibyl\nfrom sibyl.db.preprocessing import prepare_database\nfrom sibyl.sample_applications import prepare_housing_application\nfrom sibyl.utils import read_config, setup_logging\n\n\ndef _run(args):\n config = read_config(\"./sibyl/config.yml\")\n sibyl = Sibyl(config, args.docker, args.dbhost, args.dbport, args.db)\n\n sibyl.run_server(args.env, args.port)\n\n\ndef _prepare_db(args):\n prepare_database(args.config, args.dir)\n\n\ndef _prepare_housing_db(args):\n prepare_housing_application.run()\n prepare_database(\"./sibyl/sample_applications/housing_config.yml\")\n\n\ndef get_parser():\n # Common Parent - Shared options\n common = argparse.ArgumentParser(add_help=False)\n\n common.add_argument(\"-l\", \"--logfile\", help=\"Name of the logfile.If not given, log to stdout.\")\n\n common.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Be verbose. Use -vv for increased verbosity.\",\n )\n\n common.add_argument(\"--docker\", action=\"store_true\", help=\"Deploy in docker environment\")\n\n parser = argparse.ArgumentParser(description=\"Sibyl Command Line Interface.\")\n parser.set_defaults(function=None)\n\n # sibyl [action]\n action = parser.add_subparsers(title=\"action\", dest=\"action\")\n action.required = True\n\n # sibyl run\n run = action.add_parser(\"run\", help=\"Start flask server\", parents=[common])\n run.set_defaults(function=_run)\n\n run.add_argument(\"-P\", \"--port\", type=int, help=\"Flask server port\")\n run.add_argument(\n \"-E\",\n \"--env\",\n type=str,\n help=\"Flask environment\",\n choices=[\"development\", \"production\", \"test\"],\n )\n run.add_argument(\n \"--dbhost\",\n action=\"store\",\n help=\"Host address to access database. Overrides config\",\n type=str,\n )\n run.add_argument(\n \"--dbport\", action=\"store\", help=\"Port to access database. Overrides config\", type=int\n )\n run.add_argument(\n \"-D\", \"--db\", action=\"store\", help=\"Database name to use. Overrides config\", type=str\n )\n\n # sibyl prepare-db\n prepare_db = action.add_parser(\n \"prepare-db\", help=\"Prepare database from config\", parents=[common]\n )\n prepare_db.set_defaults(function=_prepare_db)\n\n prepare_db.add_argument(\"config\", action=\"store\", help=\"Path to config file to use\")\n prepare_db.add_argument(\n \"--dir\", \"--directory\", action=\"store\", help=\"Path of directory containing data\"\n )\n\n # sibyl prepare-sample-db\n prepare_sample_db = action.add_parser(\n \"prepare-sample-db\", help=\"Prepare sample database (housing)\", parents=[common]\n )\n prepare_sample_db.set_defaults(function=_prepare_housing_db)\n\n return parser\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n\n setup_logging(args.verbose, args.logfile)\n\n args.function(args)\n","repo_name":"sibyl-dev/sibyl-api","sub_path":"sibyl/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"75388822248","text":"#!/usr/bin/env python3\n# Keith Briggs 2017-08-02\n\nfrom math import cos,pi\nimport numpy as np\nfrom itertools import combinations\nfrom scipy.linalg import eigvalsh\n\nnp.set_printoptions(linewidth=200,suppress=True)\n\ndef complete_multipartite(p=(3,3,3,)):\n n=sum(p)\n A=np.zeros((n,n))\n i,spans=0,[]\n for pi in p:\n spans.append((i,i+pi))\n i+=pi\n print('spans=',spans)\n for c0,c1 in combinations(spans,2):\n for i in range(*c0):\n for j in range(*c1):\n A[i,j]=A[j,i]=1\n return A\n\nif __name__=='__main__':\n A=complete_multipartite()\n print(A)\n ev=eigvalsh(A)\n print(ev)\n","repo_name":"jwgwalton/cvxgraph_public","sub_path":"cvxgraph/multipartite_graphs/generate_multipartite_graphs.py","file_name":"generate_multipartite_graphs.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24996073082","text":"\n'''\nValidate that the class data is formatted as expected and complete\n'''\ndef check_format(classDict):\n\n\tfor clazz in classDict.values():\n\t\tif not 'prerequisites' in clazz:\n\t\t\treturn 0\n\t\tif not 'name' in clazz:\n\t\t\treturn 0\n\treturn 1\n\n'''\nValidate that all listed prerequisites exist\n'''\ndef check_existence(classDict):\n\t\n\tfor clazz in classDict.values():\n\t\tfor prereq in clazz['prerequisites']:\n\t\t\tif not prereq in classDict:\n\t\t\t\treturn 0\n\treturn 1\n\n'''\nValidate that there are no first order cyclical prerequisite relationships\n'''\ndef check_cycles(classDict):\n\n\tfor clazz in classDict.values():\n\t\tfor prereq in clazz['prerequisites']:\n\t\t\tif clazz['name'] in classDict[prereq]['prerequisites']:\n\t\t\t\treturn 0\n\treturn 1\n","repo_name":"morgansierrasnyder/snyders-class-scheduler","sub_path":"src/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32376246835","text":"\"\"\"\nhttps://app.codility.com/demo/results/trainingJRUT4G-Y9R/\n\nA small frog wants to get to the other side of a river. The frog is initially located at one bank of the river (position -1)\nand wants to get to the other bank (position N). The frog can jump over any distance F(K), where F(K) is the K-th Fibonacci \nnumber. Luckily, there are many leaves on the river, and the frog can jump between the leaves, but only in the direction of \nthe bank at position N.\n\nThe leaves on the river are represented in an array A consisting of N integers. Consecutive elements of array A represent \nconsecutive positions from 0 to N - 1 on the river. Array A contains only 0s and/or 1s:\n\n0 represents a position without a leaf;\n1 represents a position containing a leaf.\n\nThe goal is to count the minimum number of jumps in which the frog can get to the other side of the river \n(from position -1 to position N). The frog can jump between positions -1 and N (the banks of the river) \nand every position containing a leaf.\n\nFor example, consider array A=[0,0,0,1,1,0,1,0,0,0,0].\nThe frog can make three jumps of length F(5) = 5, F(3) = 2 and F(5) = 5.\n\n\"\"\"\n\n\ndef solution(A):\n A.append(1) # Append 1 to mark the other side of the river\n N = len(A)\n\n # Generate Fibonacci numbers up to N\n fib = [1, 1]\n while fib[-1] + fib[-2] <= N:\n fib.append(fib[-1] + fib[-2])\n\n # Create an array to track reachable positions\n reachable = [-1] * N\n for jump in fib:\n reachable[jump - 1] = 1\n\n # Iterate over each position in A\n for i, val in enumerate(A):\n if reachable[i] > 0 and val == 1:\n # Try different jump sizes from Fibonacci sequence\n for jump in fib:\n if jump + i >= N: # Check if jumping beyond the river's length\n break\n else:\n # Update the minimum number of jumps required to reach the current position\n if reachable[i + jump] < 0 or reachable[i + jump] > reachable[i] + 1:\n reachable[i + jump] = reachable[i] + 1\n\n return reachable[-1] \n\nA=[0,0,0,1,1,0,1,0,0,0,0]\nprint(solution(A))","repo_name":"ssuzana/codility-practice","sub_path":"13-fibonacci-numbers/FibFrog.py","file_name":"FibFrog.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"44572472755","text":"from django.db import models\n\nclass ShowManager(models.Manager):\n def validator(self, post_data):\n errors = {}\n #all errors are strings\n if len(post_data['title']) < 2:\n errors['title'] = \"TV Show title must be longer than 1 character\"\n if len(post_data['network']) < 2:\n errors['network'] = \"Network name must be longer than 1 character\"\n if not post_data['release_date']:\n errors['release_date'] = \"Must select Release Date\"\n if len(post_data['description']) < 2:\n errors['description'] = \"Description cannot be empty\"\n return errors\n\n\nclass Show(models.Model):\n title = models.CharField(max_length=100)\n network = models.CharField(max_length=100)\n release_date = models.DateTimeField(null = True)\n desc = models.CharField(max_length=250, default= \"random show\")\n objects = ShowManager()\n\n created_at=models.DateTimeField(auto_now_add=True)\n updated_at=models.DateTimeField(auto_now=True)\n\n def __str__(self): \n return f\"< {self.title} ({self.id})>\"\n\n\n","repo_name":"simonvantulder/Python_FullStack_Django","sub_path":"django_fullstack/semi_restful/restful_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17968238261","text":"import torch\nfrom torchvision import datasets, transforms\n\ndata_dir = 'Cat_Dog_data/train'\ntransform = transforms.Compose([transforms.Resize(225),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\n\ndataset = datasets.ImageFolder(data_dir, transform=transform)\ndata_loader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)\n\n\n# Data Augmentation\ntrain_transform = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()])\n\ntest_transform = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\n\ntrain_data = datasets.ImageFolder(data_dir + '/train', transform=train_transform)\ntest_data = datasets.ImageFolder(data_dir + '/test', transform=test_transform)\n\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=32)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=32)\n\ndata_iter = iter(train_loader)\n","repo_name":"gagansingh894/Deep-Learning-with-PyTorch","sub_path":"load_image_data.py","file_name":"load_image_data.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11729348268","text":"from django.contrib import admin\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Sum\n\nfrom .models import Fund, Commitment, Call, Drawdown\n\nclass FundAdmin(admin.ModelAdmin):\n list_display=[\n \"id\",\n \"name\",\n \n \n ]\n list_display_links=[\n \"id\",\n \"name\",\n\n ]\n\n\nclass CommitmentAdmin(admin.ModelAdmin):\n list_display=[\n \"id\",\n \"fund\",\n \"date\",\n \"amount\",\n \"undrawn\",\n ]\n list_display_links=[\n \"id\",\n \"fund\",\n ]\n\n\n \n\nclass DrawdownAdmin(admin.ModelAdmin):\n\n # def get_call(self,obj):\n # return obj.call.amount\n\n list_display=[\n \"id\",\n \"call\",\n \"commitment\",\n \"date\",\n \"amount\",\n ]\n \n \n\n list_display_links=[\n \"id\",\n \"call\",\n ]\n\n\n\n\nclass CallAdmin(admin.ModelAdmin):\n\n\n def allocated(self,obj):\n total=Drawdown.objects.filter(call=obj).aggregate(Sum(\"amount\"))\n return total['amount__sum']\n\n\n list_display=[\n \"id\",\n \"date\",\n \"amount\",\n \"allocated\",\n \n ]\n list_display_links=[\n \"id\",\n \"amount\",\n\n ]\n\n\n# Register your models here.\nadmin.site.site_header = 'Capital Calls Administration'\n\n\nadmin.site.register(Fund,FundAdmin)\nadmin.site.register(Commitment,CommitmentAdmin)\nadmin.site.register(Call,CallAdmin)\nadmin.site.register(Drawdown,DrawdownAdmin)\n\nadmin.site.unregister(Group)\n\n\n\n","repo_name":"DevelopwithTom/Capital-Calls","sub_path":"val_backend/val_capital/core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23572479347","text":"import json\nimport pathlib\nimport socket\nimport traceback\nfrom ast import literal_eval\nfrom base64 import b64decode, b64encode\nfrom os import environ, path\nfrom pprint import pprint\nfrom subprocess import Popen\nfrom sys import executable\n\nimport click\nimport yaml\nfrom tabulate import tabulate\n\nimport mlrun\n\nfrom .builder import upload_tarball\nfrom .config import config as mlconf\nfrom .db import get_run_db\nfrom .k8s_utils import K8sHelper\nfrom .model import RunTemplate\nfrom .platforms import auto_mount as auto_mount_modifier\nfrom .projects import load_project\nfrom .run import get_object, import_function, import_function_to_dict, new_function\nfrom .runtimes import RemoteRuntime, RunError, RuntimeKinds, ServingRuntime\nfrom .secrets import SecretsStore\nfrom .utils import (\n dict_to_yaml,\n get_in,\n list2dict,\n logger,\n parse_versioned_object_uri,\n run_keys,\n update_in,\n)\nfrom .utils.version import Version\n\n\n@click.group()\ndef main():\n pass\n\n\n@main.command(context_settings=dict(ignore_unknown_options=True))\n@click.argument(\"url\", type=str, required=False)\n@click.option(\n \"--param\",\n \"-p\",\n default=\"\",\n multiple=True,\n help=\"parameter name and value tuples, e.g. -p x=37 -p y='text'\",\n)\n@click.option(\"--inputs\", \"-i\", multiple=True, help=\"input artifact\")\n@click.option(\"--outputs\", \"-o\", multiple=True, help=\"output artifact/result for kfp\")\n@click.option(\"--in-path\", help=\"default input path/url (prefix) for artifact\")\n@click.option(\"--out-path\", help=\"default output path/url (prefix) for artifact\")\n@click.option(\n \"--secrets\", \"-s\", multiple=True, help=\"secrets file= or env=ENV_KEY1,..\"\n)\n@click.option(\"--uid\", help=\"unique run ID\")\n@click.option(\"--name\", help=\"run name\")\n@click.option(\"--workflow\", help=\"workflow name/id\")\n@click.option(\"--project\", help=\"project name/id\")\n@click.option(\"--db\", default=\"\", help=\"save run results to path or DB url\")\n@click.option(\n \"--runtime\", \"-r\", default=\"\", help=\"function spec dict, for pipeline usage\"\n)\n@click.option(\n \"--kfp\", is_flag=True, help=\"running inside Kubeflow Piplines, do not use\"\n)\n@click.option(\n \"--hyperparam\",\n \"-x\",\n default=\"\",\n multiple=True,\n help=\"hyper parameters (will expand to multiple tasks) e.g. --hyperparam p2=[1,2,3]\",\n)\n@click.option(\n \"--param-file\", default=\"\", help=\"path to csv table of execution (hyper) params\"\n)\n@click.option(\n \"--selector\",\n default=\"\",\n help=\"how to select the best result from a list, e.g. max.accuracy\",\n)\n@click.option(\n \"--hyper-param-strategy\",\n default=\"\",\n help=\"hyperparam tuning strategy list | grid | random\",\n)\n@click.option(\n \"--hyper-param-options\", default=\"\", help=\"hyperparam options json string\",\n)\n@click.option(\n \"--func-url\",\n \"-f\",\n default=\"\",\n help=\"path/url of function yaml or function \" \"yaml or db:///[:tag]\",\n)\n@click.option(\"--task\", default=\"\", help=\"path/url to task yaml\")\n@click.option(\n \"--handler\", default=\"\", help=\"invoke function handler inside the code file\"\n)\n@click.option(\"--mode\", help=\"special run mode ('pass' for using the command as is)\")\n@click.option(\"--schedule\", help=\"cron schedule\")\n@click.option(\"--from-env\", is_flag=True, help=\"read the spec from the env var\")\n@click.option(\"--dump\", is_flag=True, help=\"dump run results as YAML\")\n@click.option(\"--image\", default=\"\", help=\"container image\")\n@click.option(\"--kind\", default=\"\", help=\"serverless runtime kind\")\n@click.option(\"--source\", default=\"\", help=\"source code archive/git\")\n@click.option(\"--local\", is_flag=True, help=\"run the task locally (ignore runtime)\")\n@click.option(\n \"--auto-mount\", is_flag=True, help=\"add volume mount to job using auto mount option\"\n)\n@click.option(\"--workdir\", default=\"\", help=\"run working directory\")\n@click.option(\"--origin-file\", default=\"\", help=\"for internal use\")\n@click.option(\"--label\", multiple=True, help=\"run labels (key=val)\")\n@click.option(\"--watch\", \"-w\", is_flag=True, help=\"watch/tail run log\")\n@click.option(\"--verbose\", is_flag=True, help=\"verbose log\")\n@click.option(\n \"--scrape-metrics\",\n is_flag=True,\n help=\"whether to add the `mlrun/scrape-metrics` label to this run's resources\",\n)\n@click.argument(\"run_args\", nargs=-1, type=click.UNPROCESSED)\ndef run(\n url,\n param,\n inputs,\n outputs,\n in_path,\n out_path,\n secrets,\n uid,\n name,\n workflow,\n project,\n db,\n runtime,\n kfp,\n hyperparam,\n param_file,\n selector,\n hyper_param_strategy,\n hyper_param_options,\n func_url,\n task,\n handler,\n mode,\n schedule,\n from_env,\n dump,\n image,\n kind,\n source,\n local,\n auto_mount,\n workdir,\n origin_file,\n label,\n watch,\n verbose,\n scrape_metrics,\n run_args,\n):\n \"\"\"Execute a task and inject parameters.\"\"\"\n\n out_path = out_path or environ.get(\"MLRUN_ARTIFACT_PATH\")\n config = environ.get(\"MLRUN_EXEC_CONFIG\")\n if from_env and config:\n config = json.loads(config)\n runobj = RunTemplate.from_dict(config)\n elif task:\n obj = get_object(task)\n task = yaml.load(obj, Loader=yaml.FullLoader)\n runobj = RunTemplate.from_dict(task)\n else:\n runobj = RunTemplate()\n\n set_item(runobj.metadata, uid, \"uid\")\n set_item(runobj.metadata, name, \"name\")\n set_item(runobj.metadata, project, \"project\")\n\n if label:\n label_dict = list2dict(label)\n for k, v in label_dict.items():\n runobj.metadata.labels[k] = v\n\n if workflow:\n runobj.metadata.labels[\"workflow\"] = workflow\n runobj.metadata.labels[\"mlrun/runner-pod\"] = socket.gethostname()\n\n if db:\n mlconf.dbpath = db\n\n # remove potential quotes from command\n eval_url = py_eval(url)\n url = eval_url if isinstance(eval_url, str) else url\n url_file = url\n url_args = \"\"\n if url:\n split = url.split(maxsplit=1)\n url_file = split[0]\n if len(split) > 1:\n url_args = split[1]\n\n if func_url or kind or image:\n if func_url:\n runtime = func_url_to_runtime(func_url)\n kind = get_in(runtime, \"kind\", kind or \"job\")\n if runtime is None:\n exit(1)\n else:\n kind = kind or \"job\"\n runtime = {\"kind\": kind, \"spec\": {\"image\": image}}\n\n if kind not in [\"\", \"local\", \"dask\"] and url:\n if url_file and path.isfile(url_file):\n with open(url_file) as fp:\n body = fp.read()\n based = b64encode(body.encode(\"utf-8\")).decode(\"utf-8\")\n logger.info(f\"packing code at {url_file}\")\n update_in(runtime, \"spec.build.functionSourceCode\", based)\n url = f\"main{pathlib.Path(url_file).suffix} {url_args}\"\n update_in(runtime, \"spec.build.code_origin\", url_file)\n elif runtime:\n runtime = py_eval(runtime)\n if not isinstance(runtime, dict):\n print(f\"runtime parameter must be a dict, not {type(runtime)}\")\n exit(1)\n else:\n runtime = {}\n\n code = environ.get(\"MLRUN_EXEC_CODE\")\n if get_in(runtime, \"kind\", \"\") == \"dask\":\n code = get_in(runtime, \"spec.build.functionSourceCode\", code)\n if from_env and code:\n code = b64decode(code).decode(\"utf-8\")\n origin_file = pathlib.Path(\n get_in(runtime, \"spec.build.origin_filename\", origin_file)\n )\n if kfp:\n print(f\"code:\\n{code}\\n\")\n suffix = pathlib.Path(url_file).suffix if url else \".py\"\n if suffix != \".py\" and mode != \"pass\" and url_file != \"{codefile}\":\n print(\n f\"command/url ({url}) must specify a .py file when not in 'pass' mode\"\n )\n exit(1)\n if mode == \"pass\":\n if \"{codefile}\" in url:\n url_file = origin_file.name or \"codefile\"\n url = url.replace(\"{codefile}\", url_file)\n elif suffix == \".sh\" or origin_file.suffix == \".sh\":\n url_file = origin_file.name or \"codefile.sh\"\n url = f\"bash {url_file} {url_args}\".strip()\n else:\n print(\n \"error, command must be specified with '{codefile}' in it \"\n \"(to determine the position of the code file)\"\n )\n exit(1)\n else:\n url_file = \"main.py\"\n if origin_file.name:\n url_file = origin_file.stem + \".py\"\n url = f\"{url_file} {url_args}\".strip()\n with open(url_file, \"w\") as fp:\n fp.write(code)\n\n if url:\n if not name and not runtime:\n name = path.splitext(path.basename(url))[0]\n runobj.metadata.name = runobj.metadata.name or name\n update_in(runtime, \"spec.command\", url)\n\n if run_args:\n update_in(runtime, \"spec.args\", list(run_args))\n if image:\n update_in(runtime, \"spec.image\", image)\n set_item(runobj.spec, handler, \"handler\")\n set_item(runobj.spec, param, \"parameters\", fill_params(param))\n\n set_item(runobj.spec, hyperparam, \"hyperparams\", fill_params(hyperparam))\n if hyper_param_options:\n runobj.spec.hyper_param_options = py_eval(hyper_param_options)\n set_item(runobj.spec.hyper_param_options, param_file, \"param_file\")\n set_item(runobj.spec.hyper_param_options, hyper_param_strategy, \"strategy\")\n set_item(runobj.spec.hyper_param_options, selector, \"selector\")\n\n set_item(runobj.spec, inputs, run_keys.inputs, list2dict(inputs))\n set_item(runobj.spec, in_path, run_keys.input_path)\n set_item(runobj.spec, out_path, run_keys.output_path)\n set_item(runobj.spec, outputs, run_keys.outputs, list(outputs))\n set_item(\n runobj.spec, secrets, run_keys.secrets, line2keylist(secrets, \"kind\", \"source\")\n )\n set_item(runobj.spec, verbose, \"verbose\")\n set_item(runobj.spec, scrape_metrics, \"scrape_metrics\")\n update_in(runtime, \"metadata.name\", name, replace=False)\n update_in(runtime, \"metadata.project\", project, replace=False)\n\n if kfp or runobj.spec.verbose or verbose:\n print(f\"MLRun version: {str(Version().get())}\")\n print(\"Runtime:\")\n pprint(runtime)\n print(\"Run:\")\n pprint(runobj.to_dict())\n\n try:\n fn = new_function(runtime=runtime, kfp=kfp, mode=mode, source=source)\n if workdir:\n fn.spec.workdir = workdir\n if auto_mount:\n fn.apply(auto_mount_modifier())\n fn.is_child = from_env and not kfp\n resp = fn.run(runobj, watch=watch, schedule=schedule, local=local)\n if resp and dump:\n print(resp.to_yaml())\n except RunError as err:\n print(f\"runtime error: {err}\")\n exit(1)\n\n\n@main.command(context_settings=dict(ignore_unknown_options=True))\n@click.argument(\"func_url\", type=str, required=False)\n@click.option(\"--name\", help=\"function name\")\n@click.option(\"--project\", help=\"project name\")\n@click.option(\"--tag\", default=\"\", help=\"function tag\")\n@click.option(\"--image\", \"-i\", help=\"target image path\")\n@click.option(\n \"--source\", \"-s\", default=\"\", help=\"location/url of the source files dir/tar\"\n)\n@click.option(\"--base-image\", \"-b\", help=\"base docker image\")\n@click.option(\n \"--command\",\n \"-c\",\n default=\"\",\n multiple=True,\n help=\"build commands, e.g. '-c pip install pandas'\",\n)\n@click.option(\"--secret-name\", default=\"\", help=\"container registry secret name\")\n@click.option(\"--archive\", \"-a\", default=\"\", help=\"destination archive for code (tar)\")\n@click.option(\"--silent\", is_flag=True, help=\"do not show build logs\")\n@click.option(\"--with-mlrun\", is_flag=True, help=\"add MLRun package\")\n@click.option(\"--db\", default=\"\", help=\"save run results to path or DB url\")\n@click.option(\n \"--runtime\", \"-r\", default=\"\", help=\"function spec dict, for pipeline usage\"\n)\n@click.option(\n \"--kfp\", is_flag=True, help=\"running inside Kubeflow Piplines, do not use\"\n)\n@click.option(\"--skip\", is_flag=True, help=\"skip if already deployed\")\ndef build(\n func_url,\n name,\n project,\n tag,\n image,\n source,\n base_image,\n command,\n secret_name,\n archive,\n silent,\n with_mlrun,\n db,\n runtime,\n kfp,\n skip,\n):\n \"\"\"Build a container image from code and requirements.\"\"\"\n\n if db:\n mlconf.dbpath = db\n\n if runtime:\n runtime = py_eval(runtime)\n if not isinstance(runtime, dict):\n print(f\"runtime parameter must be a dict, not {type(runtime)}\")\n exit(1)\n if kfp:\n print(\"Runtime:\")\n pprint(runtime)\n func = new_function(runtime=runtime)\n elif func_url.startswith(\"db://\"):\n func_url = func_url[5:]\n func = import_function(func_url)\n elif func_url:\n func_url = \"function.yaml\" if func_url == \".\" else func_url\n func = import_function(func_url)\n else:\n print(\"please specify the function path or url\")\n exit(1)\n\n meta = func.metadata\n meta.project = project or meta.project or mlconf.default_project\n meta.name = name or meta.name\n meta.tag = tag or meta.tag\n\n b = func.spec.build\n if func.kind not in [\"\", \"local\"]:\n b.base_image = base_image or b.base_image\n b.commands = list(command) or b.commands\n b.image = image or b.image\n b.secret = secret_name or b.secret\n\n if source.endswith(\".py\"):\n if not path.isfile(source):\n print(f\"source file doesnt exist ({source})\")\n exit(1)\n with open(source) as fp:\n body = fp.read()\n based = b64encode(body.encode(\"utf-8\")).decode(\"utf-8\")\n logger.info(f\"packing code at {source}\")\n b.functionSourceCode = based\n func.spec.command = \"\"\n else:\n b.source = source or b.source\n # todo: upload stuff\n\n archive = archive or mlconf.default_archive\n if archive:\n src = b.source or \"./\"\n logger.info(f\"uploading data from {src} to {archive}\")\n target = archive if archive.endswith(\"/\") else archive + \"/\"\n target += f\"src-{meta.project}-{meta.name}-{meta.tag or 'latest'}.tar.gz\"\n upload_tarball(src, target)\n # todo: replace function.yaml inside the tar\n b.source = target\n\n if hasattr(func, \"deploy\"):\n logger.info(\"remote deployment started\")\n try:\n func.deploy(\n with_mlrun=with_mlrun, watch=not silent, is_kfp=kfp, skip_deployed=skip\n )\n except Exception as err:\n print(f\"deploy error, {err}\")\n exit(1)\n\n state = func.status.state\n image = func.spec.image\n if kfp:\n with open(\"/tmp/state\", \"w\") as fp:\n fp.write(state or \"none\")\n full_image = func.full_image_path(image) or \"\"\n with open(\"/tmp/image\", \"w\") as fp:\n fp.write(image)\n with open(\"/tmp/fullimage\", \"w\") as fp:\n fp.write(full_image)\n print(\"full image path = \", full_image)\n\n print(f\"function built, state={state} image={image}\")\n else:\n print(\"function does not have a deploy() method\")\n exit(1)\n\n\n@main.command(context_settings=dict(ignore_unknown_options=True))\n@click.argument(\"spec\", type=str, required=False)\n@click.option(\"--source\", \"-s\", default=\"\", help=\"location/url of the source\")\n@click.option(\n \"--func-url\",\n \"-f\",\n default=\"\",\n help=\"path/url of function yaml or function \" \"yaml or db:///[:tag]\",\n)\n@click.option(\"--dashboard\", \"-d\", default=\"\", help=\"nuclio dashboard url\")\n@click.option(\"--project\", \"-p\", default=\"\", help=\"project name\")\n@click.option(\"--model\", \"-m\", multiple=True, help=\"model name and path (name=path)\")\n@click.option(\"--kind\", \"-k\", default=None, help=\"runtime sub kind\")\n@click.option(\"--tag\", default=\"\", help=\"version tag\")\n@click.option(\"--env\", \"-e\", multiple=True, help=\"environment variables\")\n@click.option(\"--verbose\", is_flag=True, help=\"verbose log\")\ndef deploy(spec, source, func_url, dashboard, project, model, tag, kind, env, verbose):\n \"\"\"Deploy model or function\"\"\"\n if func_url:\n runtime = func_url_to_runtime(func_url)\n if runtime is None:\n exit(1)\n elif spec:\n runtime = py_eval(spec)\n else:\n runtime = {}\n if not isinstance(runtime, dict):\n print(f\"runtime parameter must be a dict, not {type(runtime)}\")\n exit(1)\n\n if verbose:\n pprint(runtime)\n pprint(model)\n\n # support both v1 & v2+ model struct for backwards compatibility\n if runtime and runtime[\"kind\"] == RuntimeKinds.serving:\n print(\"Deploying V2 model server\")\n function = ServingRuntime.from_dict(runtime)\n if model:\n # v2+ model struct (list of json obj)\n for _model in model:\n args = json.loads(_model)\n function.add_model(**args)\n else:\n function = RemoteRuntime.from_dict(runtime)\n if kind:\n function.spec.function_kind = kind\n if model:\n # v1 model struct (list of k=v)\n models = list2dict(model)\n for k, v in models.items():\n function.add_model(k, v)\n\n function.spec.source = source\n if env:\n for k, v in list2dict(env).items():\n function.set_env(k, v)\n function.verbose = verbose\n\n try:\n addr = function.deploy(dashboard=dashboard, project=project, tag=tag)\n except Exception as err:\n print(f\"deploy error: {err}\")\n exit(1)\n\n print(f\"function deployed, address={addr}\")\n with open(\"/tmp/output\", \"w\") as fp:\n fp.write(addr)\n with open(\"/tmp/name\", \"w\") as fp:\n fp.write(function.status.nuclio_name)\n\n\n@main.command(context_settings=dict(ignore_unknown_options=True))\n@click.argument(\"pod\", type=str)\n@click.option(\"--namespace\", \"-n\", help=\"kubernetes namespace\")\n@click.option(\n \"--timeout\", \"-t\", default=600, show_default=True, help=\"timeout in seconds\"\n)\ndef watch(pod, namespace, timeout):\n \"\"\"Read current or previous task (pod) logs.\"\"\"\n k8s = K8sHelper(namespace)\n status = k8s.watch(pod, namespace, timeout)\n print(f\"Pod {pod} last status is: {status}\")\n\n\n@main.command(context_settings=dict(ignore_unknown_options=True))\n@click.argument(\"kind\", type=str)\n@click.argument(\"name\", type=str, default=\"\", required=False)\n@click.option(\"--selector\", \"-s\", default=\"\", help=\"label selector\")\n@click.option(\"--namespace\", \"-n\", help=\"kubernetes namespace\")\n@click.option(\"--uid\", help=\"unique ID\")\n@click.option(\"--project\", \"-p\", help=\"project name\")\n@click.option(\"--tag\", \"-t\", default=\"\", help=\"artifact/function tag\")\n@click.option(\"--db\", help=\"db path/url\")\n@click.argument(\"extra_args\", nargs=-1, type=click.UNPROCESSED)\ndef get(kind, name, selector, namespace, uid, project, tag, db, extra_args):\n \"\"\"List/get one or more object per kind/class.\"\"\"\n\n if db:\n mlconf.dbpath = db\n\n if kind.startswith(\"po\"):\n k8s = K8sHelper(namespace)\n if name:\n resp = k8s.get_pod(name, namespace)\n print(resp)\n return\n\n items = k8s.list_pods(namespace, selector)\n print(f\"{'state':10} {'started':16} {'type':8} name\")\n for i in items:\n task = i.metadata.labels.get(\"mlrun/class\", \"\")\n if task:\n name = i.metadata.name\n state = i.status.phase\n start = \"\"\n if i.status.start_time:\n start = i.status.start_time.strftime(\"%b %d %H:%M:%S\")\n print(f\"{state:10} {start:16} {task:8} {name}\")\n elif kind.startswith(\"runtime\"):\n run_db = get_run_db(db or mlconf.dbpath)\n if name:\n # the runtime identifier is its kind\n runtime = run_db.list_runtime_resources(kind=name, label_selector=selector)\n print(dict_to_yaml(runtime.dict()))\n return\n runtimes = run_db.list_runtime_resources(label_selector=selector)\n print(dict_to_yaml(runtimes.dict()))\n elif kind.startswith(\"run\"):\n run_db = get_run_db()\n if name:\n run = run_db.read_run(name, project=project)\n print(dict_to_yaml(run))\n return\n\n runs = run_db.list_runs(uid=uid, project=project, labels=selector)\n df = runs.to_df()[\n [\"name\", \"uid\", \"iter\", \"start\", \"state\", \"parameters\", \"results\"]\n ]\n # df['uid'] = df['uid'].apply(lambda x: f'..{x[-6:]}')\n df[\"start\"] = df[\"start\"].apply(time_str)\n df[\"parameters\"] = df[\"parameters\"].apply(dict_to_str)\n df[\"results\"] = df[\"results\"].apply(dict_to_str)\n print(tabulate(df, headers=\"keys\"))\n\n elif kind.startswith(\"art\"):\n run_db = get_run_db()\n artifacts = run_db.list_artifacts(\n name, project=project, tag=tag, labels=selector\n )\n df = artifacts.to_df()[\n [\"tree\", \"key\", \"iter\", \"kind\", \"path\", \"hash\", \"updated\"]\n ]\n df[\"tree\"] = df[\"tree\"].apply(lambda x: f\"..{x[-8:]}\")\n df[\"hash\"] = df[\"hash\"].apply(lambda x: f\"..{x[-6:]}\")\n print(tabulate(df, headers=\"keys\"))\n\n elif kind.startswith(\"func\"):\n run_db = get_run_db()\n if name:\n f = run_db.get_function(name, project=project, tag=tag)\n print(dict_to_yaml(f))\n return\n\n functions = run_db.list_functions(name, project=project, labels=selector)\n lines = []\n headers = [\"kind\", \"state\", \"name:tag\", \"hash\"]\n for f in functions:\n name = get_in(f, \"metadata.name\")\n tag = get_in(f, \"metadata.tag\", \"\")\n line = [\n get_in(f, \"kind\", \"\"),\n get_in(f, \"status.state\", \"\"),\n f\"{name}:{tag}\",\n get_in(f, \"metadata.hash\", \"\"),\n ]\n lines.append(line)\n print(tabulate(lines, headers=headers))\n else:\n print(\n \"currently only get pods | runs | artifacts | func [name] | runtime are supported\"\n )\n\n\n@main.command()\n@click.option(\"--port\", \"-p\", help=\"port to listen on\", type=int)\n@click.option(\"--dirpath\", \"-d\", help=\"database directory (dirpath)\")\ndef db(port, dirpath):\n \"\"\"Run HTTP api/database server\"\"\"\n env = environ.copy()\n if port is not None:\n env[\"MLRUN_httpdb__port\"] = str(port)\n if dirpath is not None:\n env[\"MLRUN_httpdb__dirpath\"] = dirpath\n\n cmd = [executable, \"-m\", \"mlrun.api.main\"]\n child = Popen(cmd, env=env)\n returncode = child.wait()\n if returncode != 0:\n raise SystemExit(returncode)\n\n\n@main.command()\ndef version():\n \"\"\"get mlrun version\"\"\"\n print(f\"MLRun version: {str(Version().get())}\")\n\n\n@main.command()\n@click.argument(\"uid\", type=str)\n@click.option(\"--project\", \"-p\", help=\"project name\")\n@click.option(\"--offset\", type=int, default=0, help=\"byte offset\")\n@click.option(\"--db\", help=\"api and db service path/url\")\n@click.option(\"--watch\", \"-w\", is_flag=True, help=\"watch/follow log\")\ndef logs(uid, project, offset, db, watch):\n \"\"\"Get or watch task logs\"\"\"\n mldb = get_run_db(db or mlconf.dbpath)\n if mldb.kind == \"http\":\n state = mldb.watch_log(uid, project, watch=watch, offset=offset)\n else:\n state, text = mldb.get_log(uid, project, offset=offset)\n if text:\n print(text.decode())\n\n if state:\n print(f\"final state: {state}\")\n\n\n@main.command()\n@click.argument(\"context\", default=\"\", type=str, required=False)\n@click.option(\"--name\", \"-n\", help=\"project name\")\n@click.option(\"--url\", \"-u\", help=\"remote git or archive url\")\n@click.option(\"--run\", \"-r\", help=\"run workflow name of .py file\")\n@click.option(\n \"--arguments\",\n \"-a\",\n default=\"\",\n multiple=True,\n help=\"Kubeflow pipeline arguments name and value tuples (with -r flag), e.g. -a x=6\",\n)\n@click.option(\"--artifact-path\", \"-p\", help=\"output artifacts path\")\n@click.option(\n \"--param\",\n \"-x\",\n default=\"\",\n multiple=True,\n help=\"mlrun project parameter name and value tuples, e.g. -p x=37 -p y='text'\",\n)\n@click.option(\n \"--secrets\", \"-s\", multiple=True, help=\"secrets file= or env=ENV_KEY1,..\"\n)\n@click.option(\"--namespace\", help=\"k8s namespace\")\n@click.option(\"--db\", help=\"api and db service path/url\")\n@click.option(\"--init-git\", is_flag=True, help=\"for new projects init git context\")\n@click.option(\n \"--clone\", \"-c\", is_flag=True, help=\"force override/clone into the context dir\"\n)\n@click.option(\"--sync\", is_flag=True, help=\"sync functions into db\")\n@click.option(\n \"--watch\", \"-w\", is_flag=True, help=\"wait for pipeline completion (with -r flag)\"\n)\n@click.option(\n \"--dirty\", \"-d\", is_flag=True, help=\"allow run with uncommitted git changes\"\n)\n@click.option(\"--git-repo\", help=\"git repo (org/repo) for git comments\")\n@click.option(\n \"--git-issue\", type=int, default=None, help=\"git issue number for git comments\"\n)\n@click.option(\"--handler\", default=None, help=\"workflow function handler name\")\n@click.option(\"--engine\", default=None, help=\"workflow engine (kfp/local)\")\n@click.option(\"--local\", is_flag=True, help=\"try to run workflow functions locally\")\ndef project(\n context,\n name,\n url,\n run,\n arguments,\n artifact_path,\n param,\n secrets,\n namespace,\n db,\n init_git,\n clone,\n sync,\n watch,\n dirty,\n git_repo,\n git_issue,\n handler,\n engine,\n local,\n):\n \"\"\"load and/or run a project\"\"\"\n if db:\n mlconf.dbpath = db\n\n proj = load_project(context, url, name, init_git=init_git, clone=clone)\n url_str = \" from \" + url if url else \"\"\n print(f\"Loading project {proj.name}{url_str} into {context}:\\n\")\n\n if artifact_path and not (\"://\" in artifact_path or artifact_path.startswith(\"/\")):\n artifact_path = path.abspath(artifact_path)\n if param:\n proj.spec.params = fill_params(param, proj.spec.params)\n if git_repo:\n proj.spec.params[\"git_repo\"] = git_repo\n if git_issue:\n proj.spec.params[\"git_issue\"] = git_issue\n commit = (\n proj.get_param(\"commit_id\")\n or environ.get(\"GITHUB_SHA\")\n or environ.get(\"CI_COMMIT_SHA\")\n )\n if commit:\n proj.spec.params[\"commit_id\"] = commit\n if secrets:\n secrets = line2keylist(secrets, \"kind\", \"source\")\n proj._secrets = SecretsStore.from_list(secrets)\n print(proj.to_yaml())\n\n if run:\n workflow_path = None\n if run.endswith(\".py\"):\n workflow_path = run\n run = None\n\n args = None\n if arguments:\n args = fill_params(arguments)\n\n print(f\"running workflow {run} file: {workflow_path}\")\n message = run_result = \"\"\n had_error = False\n gitops = (\n git_issue\n or environ.get(\"GITHUB_EVENT_PATH\")\n or environ.get(\"CI_MERGE_REQUEST_IID\")\n )\n if gitops:\n proj.notifiers.git_comment(\n git_repo, git_issue, token=proj.get_secret(\"GITHUB_TOKEN\")\n )\n try:\n run_result = proj.run(\n run,\n workflow_path,\n arguments=args,\n artifact_path=artifact_path,\n namespace=namespace,\n sync=sync,\n dirty=dirty,\n workflow_handler=handler,\n engine=engine,\n local=local,\n )\n print(f\"run id: {run_result.run_id}\")\n except Exception as exc:\n print(traceback.format_exc())\n message = f\"failed to run pipeline, {exc}\"\n had_error = True\n print(message)\n\n if had_error:\n proj.notifiers.push(message)\n if had_error:\n exit(1)\n\n if watch and run_result and run_result.workflow.engine == \"kfp\":\n proj.get_run_status(run_result)\n\n elif sync:\n print(\"saving project functions to db ..\")\n proj.sync_functions(save=True)\n\n\ndef validate_kind(ctx, param, value):\n possible_kinds = RuntimeKinds.runtime_with_handlers()\n if value is not None and value not in possible_kinds:\n raise click.BadParameter(\n f\"kind must be one of {possible_kinds}\", ctx=ctx, param=param\n )\n return value\n\n\n@main.command()\n@click.argument(\"kind\", callback=validate_kind, default=None, required=False)\n@click.argument(\"object_id\", metavar=\"id\", type=str, default=None, required=False)\n@click.option(\"--api\", help=\"api service url\")\n@click.option(\"--label-selector\", \"-ls\", default=\"\", help=\"label selector\")\n@click.option(\n \"--force\", \"-f\", is_flag=True, help=\"clean resources in non-terminal states as well\"\n)\n@click.option(\n \"--grace-period\",\n \"-gp\",\n type=int,\n # When someone triggers the cleanup manually we assume they want runtime resources in terminal state to be removed\n # now, therefore not using here mlconf.runtime_resources_deletion_grace_period\n default=0,\n help=\"the grace period (in seconds) that will be given to runtime resources (after they're in terminal state) \"\n \"before cleaning them. Ignored when --force is given\",\n show_default=True,\n)\ndef clean(kind, object_id, api, label_selector, force, grace_period):\n \"\"\"\n Clean jobs resources\n\n \\b\n Examples:\n\n \\b\n # Clean resources for all runs of all runtimes\n mlrun clean\n\n \\b\n # Clean resources for all runs of a specific kind (e.g. job)\n mlrun clean job\n\n \\b\n # Clean resources for specific job (by uid)\n mlrun clean mpijob 15d04c19c2194c0a8efb26ea3017254b\n \"\"\"\n mldb = get_run_db(api or mlconf.dbpath)\n mldb.delete_runtime_resources(\n kind=kind,\n object_id=object_id,\n label_selector=label_selector,\n force=force,\n grace_period=grace_period,\n )\n\n\n@main.command(name=\"watch-stream\")\n@click.argument(\"url\", type=str)\n@click.option(\n \"--shard-ids\",\n \"-s\",\n multiple=True,\n type=int,\n help=\"shard id to listen on (can be multiple)\",\n)\n@click.option(\"--seek\", help=\"where to start/seek (EARLIEST or LATEST)\")\n@click.option(\n \"--interval\",\n \"-i\",\n default=3,\n show_default=True,\n help=\"interval in seconds\",\n type=int,\n)\n@click.option(\n \"--is-json\",\n \"-j\",\n is_flag=True,\n help=\"indicate the payload is json (will be deserialized)\",\n)\ndef watch_stream(url, shard_ids, seek, interval, is_json):\n \"\"\"watch on a stream and print data every interval\"\"\"\n mlrun.platforms.watch_stream(\n url, shard_ids, seek, interval=interval, is_json=is_json\n )\n\n\n@main.command(name=\"config\")\ndef show_config():\n \"\"\"Show configuration & exit\"\"\"\n print(mlconf.dump_yaml())\n\n\ndef fill_params(params, params_dict=None):\n params_dict = params_dict or {}\n for param in params:\n i = param.find(\"=\")\n if i == -1:\n continue\n key, value = param[:i].strip(), param[i + 1 :].strip()\n if key is None:\n raise ValueError(f\"cannot find param key in line ({param})\")\n params_dict[key] = py_eval(value)\n return params_dict\n\n\ndef py_eval(data):\n try:\n value = literal_eval(data)\n return value\n except (SyntaxError, ValueError):\n return data\n\n\ndef set_item(obj, item, key, value=None):\n if item:\n if value:\n setattr(obj, key, value)\n else:\n setattr(obj, key, item)\n\n\ndef line2keylist(lines: list, keyname=\"key\", valname=\"path\"):\n out = []\n for line in lines:\n i = line.find(\"=\")\n if i == -1:\n raise ValueError(f'cannot find \"=\" in line ({keyname}={valname})')\n key, value = line[:i].strip(), line[i + 1 :].strip()\n if key is None:\n raise ValueError(f\"cannot find key in line ({keyname}={valname})\")\n value = path.expandvars(value)\n out += [{keyname: key, valname: value}]\n return out\n\n\ndef time_str(x):\n try:\n return x.strftime(\"%b %d %H:%M:%S\")\n except ValueError:\n return \"\"\n\n\ndef dict_to_str(struct: dict):\n if not struct:\n return []\n return \",\".join([f\"{k}={v}\" for k, v in struct.items()])\n\n\ndef func_url_to_runtime(func_url):\n try:\n if func_url.startswith(\"db://\"):\n func_url = func_url[5:]\n project_instance, name, tag, hash_key = parse_versioned_object_uri(func_url)\n run_db = get_run_db(mlconf.dbpath)\n runtime = run_db.get_function(name, project_instance, tag, hash_key)\n else:\n func_url = \"function.yaml\" if func_url == \".\" else func_url\n runtime = import_function_to_dict(func_url, {})\n except Exception as exc:\n logger.error(f\"function {func_url} not found, {exc}\")\n return None\n\n if not runtime:\n logger.error(f\"function {func_url} not found or is null\")\n return None\n\n return runtime\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jasonnIguazio/ghpages-mlrun","sub_path":"mlrun/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":32977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74530455209","text":"import socket\r\nimport cv2\r\nimport base64\r\nimport time\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\naddServer = ('127.0.0.1', 55555)\r\nsock.connect(addServer)\r\n\r\nsock.send(b'!123')\r\n\r\ncapture = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n flag, frame = capture.read()\r\n img_resize = cv2.resize(frame,(640,480))\r\n (status,img_encode) = cv2.imencode('.jpeg', img_resize)\r\n data = img_encode.tostring()\r\n\r\n print('data send ...')\r\n sock.send(b'%123|'+ base64.b64encode(data) +b'&')\r\n # time.sleep(1)\r\n frame = capture.read()\r\n","repo_name":"kengsini250/presentation","sub_path":"pytest/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71770768168","text":"import argparse\r\nimport os\r\nfrom model.casRel import CasRel\r\nfrom model.config import Config\r\nfrom model.predict import *\r\nimport json\r\n\r\n\r\nseed = 226\r\ntorch.manual_seed(seed)\r\n\r\nparser = argparse.ArgumentParser(description='Model Controller')\r\nparser.add_argument('--lr', type=float, default=1e-5, help='learning rate')\r\nparser.add_argument('--batch_size', type=int, default=8)\r\nparser.add_argument('--max_epoch', type=int, default=25)\r\nparser.add_argument('--max_len', type=int, default=300)\r\nparser.add_argument('--dataset', default='duie', type=str, help='define your own dataset names')\r\nparser.add_argument(\"--bert_name\", default='bert-chinese-wwm', type=str, help='choose pretrained bert name')\r\nparser.add_argument('--bert_dim', default=768, type=int)\r\nparser.add_argument(\"--device\", default='0')\r\nargs = parser.parse_args()\r\ncon = Config(args)\r\n\r\nif args.device != 'cpu':\r\n assert args.device.isdigit()\r\n device = torch.device('cuda:{}'.format(args.device))\r\nelse:\r\n device = torch.device('cpu')\r\n\r\nif __name__ == '__main__':\r\n model = CasRel(con).to(device)\r\n rel_vocab = load_rel(con.rel_path)\r\n\r\n # load checkpoint weight\r\n # path = \"./saved_weights/duie/epoch_25_duie_model.pkl\"\r\n # if os.path.exists(path):\r\n # print(\"-\" * 5 + \"Begin Loading Model\" + \"-\" * 5)\r\n # checkpoint = torch.load(path)\r\n # model.load_state_dict(checkpoint['model'])\r\n # print(\"-\" * 5 + \"Finish Loading!\" + \"-\" * 5)\r\n\r\n print(\"-\" * 5 + \"Begin Loading Model\" + \"-\" * 5)\r\n path = \"./saved_weights/epoch_30#30_duie_model.pkl\"\r\n model.load_state_dict(torch.load(path, ))\r\n # map_location={'cuda:1': 'cuda:0'}))\r\n # map_location='cpu'))\r\n print(\"-\" * 5 + \"Finish Loading!\" + \"-\" * 5)\r\n\r\n # predict\r\n model.eval()\r\n while 1:\r\n text = input(\"enter a sentence: \")\r\n tokens = get_tokenized(con, text)\r\n pred_dict = predictor(tokens, rel_vocab, model)\r\n pred_result = json.dumps(pred_dict, ensure_ascii=False, indent=4)\r\n print(\"predicated SRO:\\n\", str(pred_result))\r\n","repo_name":"Belalaris/CasRel","sub_path":"Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"70096166568","text":"# imports\r\nimport os\r\nimport csv\r\nimport random\r\nimport pygame\r\nfrom datetime import datetime\r\nfrom itertools import zip_longest\r\nfrom collections import OrderedDict\r\n\r\n# Settings class\r\nclass Settings():\r\n\r\n # class attributes\r\n radius = 60 # radius of cricle\r\n lineLength = 40 # line length of fixcross\r\n lineWidth = 5 # line width of fixcross\r\n FPS = 60 # frames per second\r\n bgColor = (180, 180, 180) # bg is light grey\r\n blackColor = (0, 0, 0) # text is black\r\n redColor = (255, 0, 0) # red color\r\n greenColor = (0, 255, 0) # blue color\r\n screenSize = (1200, 800) # set screen size\r\n\r\n # results dict\r\n results = OrderedDict([(\"id\", []),\r\n (\"age\", []),\r\n (\"gender\", []),\r\n (\"major\", []),\r\n (\"stimlist\", []),\r\n (\"responses\", []),\r\n (\"rts\", []),\r\n ])\r\n\r\n # instance attributes\r\n def __init__(self):\r\n\r\n # init experiment and pygame\r\n self.init_pygame()\r\n self.init_experiment()\r\n\r\n # variable instance placeholders\r\n self.stimcolor = None # placeholder for stimulus color\r\n self.verPoints = None # placeholder for vert. points of fixcross\r\n self.horPoints = None # placeholder for hor. points of fixcross\r\n self.response = None # variable holding temporary response\r\n\r\n # attributes that get filled\r\n self.instWidth = self.screenSize[0] - (self.screenSize[0] // 10)\r\n self.instHeight = self.screenSize[1] - (self.screenSize[1] // 10)\r\n self.continueVal = 0 # boolean value to control continue events\r\n self.starter = 0 # boolean value to control task start events\r\n self.quit = 0 # boolean value to control closing experiment at end\r\n self.filename = self.get_filename()\r\n self.instPath = self.create_filepath(\"instructions\")\r\n self.stimuliPath = self.create_filepath(\"stimuli\")\r\n self.dataPath = self.create_filepath(\"data\")\r\n\r\n # stimuli loading\r\n self.load_stimuli()\r\n\r\n # load load instructions\r\n self.inst_welcome = self.load_instructions(\"welcome.txt\")\r\n self.inst_intro1 = self.load_instructions(\"intro1.txt\")\r\n self.inst_intro2 = self.load_instructions(\"intro2.txt\")\r\n self.inst_startTask = self.load_instructions(\"starttask.txt\")\r\n self.inst_endTask = self.load_instructions(\"endtask.txt\")\r\n self.inst_goodbye = self.load_instructions(\"goodbye.txt\")\r\n\r\n\r\n # === define helper functions that are called inside run_experiment() === #\r\n def demographics_input(self):\r\n \"\"\"Asks for participant demographics.\"\"\"\r\n\r\n self.results[\"id\"].append(input(\"Please enter an ID: \"))\r\n self.results[\"age\"].append(input(\"Please enter your age: \"))\r\n self.results[\"gender\"].append(input(\"Please enter your gender (m/f/other): \"))\r\n self.results[\"major\"].append(input(\"Please enter your major: \"))\r\n\r\n def init_pygame(self):\r\n \"\"\"\r\n initializes pygame explicitly.\r\n \"\"\"\r\n # initialize pygame modules\r\n pygame.init()\r\n pygame.mouse.set_visible(False) # disable mouse\r\n\r\n # set frame rate\r\n clock = pygame.time.Clock()\r\n clock.tick(self.FPS)\r\n\r\n\r\n def init_experiment(self):\r\n \"\"\"\r\n initializes experiment pygame settings explicitly with\r\n predefined params.\r\n \"\"\"\r\n\r\n # get demographics\r\n self.demographics_input()\r\n # define screen settings\r\n self.screen = pygame.display.set_mode(self.screenSize, pygame.FULLSCREEN)\r\n # get screen rect and set font\r\n self.instFont = pygame.font.SysFont(\"Arial\", 30)\r\n self.screenRect = self.screen.get_rect()\r\n # set circle position\r\n self.circlePos = (self.screenRect.centerx, self.screenRect.centery)\r\n\r\n def create_filepath(self, appended_text_to_abs_path):\r\n \"\"\" get os path and append to it custom directory.\"\"\"\r\n\r\n absPath = os.path.abspath(os.curdir)\r\n finalPath = os.path.join(absPath, appended_text_to_abs_path)\r\n return finalPath\r\n\r\n def get_filename(self):\r\n res_string = self.results[\"id\"][0] + \\\r\n \"_stroop_data_\" + \\\r\n datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + \\\r\n '.csv'\r\n return res_string\r\n\r\n def load_stimuli(self):\r\n \"\"\"loads stimuli lists.\"\"\"\r\n\r\n # load items (column 0)\r\n self.results[\"stimlist\"] = self.get_items(\"stimuli.csv\", column=0)\r\n\r\n\r\n def get_items(self, filename, column):\r\n \"\"\"\r\n loads items from a csv file and returns a randomly shuffled list\r\n that will serve as the stimuli list.\r\n arg1: filename\r\n arg2: column to read from\r\n return: shuffled list items\r\n \"\"\"\r\n\r\n # opens the file\r\n with open(os.path.join(self.stimuliPath, filename), 'r', newline = \"\") as csvfile:\r\n # define reader\r\n reader = csv.reader(csvfile, delimiter=';')\r\n # initialize local empty list\r\n items = []\r\n # iterate over rows of sepcified column\r\n for row in reader:\r\n item = row[column]\r\n items.append(item) # append to local list\r\n # shuffle list\r\n random.shuffle(items)\r\n # return shuffled list\r\n return items\r\n\r\n def load_instructions(self, filename):\r\n \"\"\"\r\n loads instructions from a text file.\r\n arg: name of file\r\n returns: content of file\r\n \"\"\"\r\n\r\n # open file\r\n with open(os.path.join(self.instPath, filename), 'r') as file:\r\n infile = file.read()\r\n # return content as string\r\n return infile\r\n\r\n def save_results(self, filename, resultsdict):\r\n \"\"\"\r\n saves results to a csv file.\r\n arg1: filename\r\n arg2: dictionary holding resultsdict\r\n \"\"\"\r\n # open data file\r\n with open(os.path.join(self.dataPath ,filename), 'w', newline=\"\") as file:\r\n # create csv writer\r\n w = csv.writer(file, delimiter=';')\r\n # write first row (variable labels)\r\n w.writerow(resultsdict.keys())\r\n # write data row wise\r\n w.writerows(zip_longest(*resultsdict.values()))\r\n","repo_name":"imarevic/psy_python_course","sub_path":"labsolutions/Lab10_gonogo_solution/oop_config.py","file_name":"oop_config.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"}
+{"seq_id":"27959791383","text":"\"\"\"Calculates the credit card balance after one year if\r\na person only pays the minimum monthly payment required by the credit card company each month.\r\n\"\"\"\r\n\r\nfor i in range(12):\r\n payment = balance * monthlyPaymentRate\r\n unpaid_balance = balance - payment\r\n balance = unpaid_balance + ((annualInterestRate / 12.0) * unpaid_balance)\r\n # The last iteration as the 12th month\r\n if i == 11:\r\n print(\"Remaining balance:\", round(balance, 2))","repo_name":"rivea0/mit-6.00.1x","sub_path":"problem_sets/pset2/pset2.1/pset2-1.py","file_name":"pset2-1.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"22867177529","text":"\n#LeetCode 463. Island Perimeter\n\ndef islandPerimeter(grid: [[int]]) -> int:\n\n if len(grid) == 0:\n return 0\n \n perim = 0\n\n #--count bit=flips, row by row\n for i in range(len(grid)):\n cur_bit_val = 0\n for j in range(len(grid[i])):\n if grid[i][j] != cur_bit_val:\n cur_bit_val = grid[i][j]\n perim += 1\n\n #---if you hit the boarder and you're on a 1, increment perim\n if j == len(grid[i]) - 1 and cur_bit_val == 1:\n perim += 1\n\n #---count bit-flips, column by column\n for j in range(len(grid[0])):\n cur_bit_val = 0\n for i in range(len(grid)):\n if grid[i][j] != cur_bit_val:\n cur_bit_val = grid[i][j]\n perim += 1\n \n #---if you hit the boarder and you're on a 1, increment perim\n if i == len(grid) - 1 and cur_bit_val == 1:\n perim +=1\n \n return perim\n\n# -- This is a grid DFS\n#\ndef getRegionSize(grid: [[int]], row: int, col: int):\n \n if row < 0 or col < 0 or row >= len(grid) or col >= len(grid[row]):\n return 0\n if grid[row][col] == 0:\n return 0\n \n grid[row][col] = 0\n sz = 1\n\n for r in range(row-1,row+2):\n for c in range(col-1,col+2):\n if row != r or col != c:\n sz += getRegionSize(grid, r, c)\n return sz\n\ng = [[0,1,0,0,0],\n [1,1,1,1,1],\n [0,1,0,0,0],\n [1,1,0,0,0]]\n\nprint(islandPerimeter(g))","repo_name":"jcravener/PythonWorkroom","sub_path":"IslandPerimeter.py","file_name":"IslandPerimeter.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24849505740","text":"# coding: utf-8\nimport gc\nimport logging\nimport os\nimport sys\nimport time\nimport pytest\n\nimport ray.cluster_utils\nfrom ray._private.test_utils import (\n wait_for_pid_to_exit,\n client_test_enabled,\n run_string_as_driver,\n)\n\nimport ray\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_background_tasks_with_max_calls(shutdown_only):\n ray.init(\n # TODO (Alex): We need to fix\n # https://github.com/ray-project/ray/issues/20203 to remove this flag.\n num_cpus=2,\n _system_config={\"worker_cap_initial_backoff_delay_ms\": 0},\n )\n\n num_tasks = 3 if sys.platform == \"win32\" else 10\n\n @ray.remote\n def g():\n time.sleep(0.1)\n return 0\n\n @ray.remote(max_calls=1, max_retries=0)\n def f():\n return [g.remote()]\n\n nested = ray.get([f.remote() for _ in range(num_tasks)])\n\n # Should still be able to retrieve these objects, since f's workers will\n # wait for g to finish before exiting.\n ray.get([x[0] for x in nested])\n\n @ray.remote(max_calls=1, max_retries=0)\n def f():\n return os.getpid(), g.remote()\n\n nested = ray.get([f.remote() for _ in range(num_tasks)])\n while nested:\n pid, g_id = nested.pop(0)\n assert ray.get(g_id) == 0\n del g_id\n # Necessary to dereference the object via GC, so the worker can exit.\n gc.collect()\n wait_for_pid_to_exit(pid)\n\n\ndef test_actor_killing(shutdown_only):\n # This is to test create and kill an actor immediately\n import ray\n\n ray.init(num_cpus=1)\n\n @ray.remote(num_cpus=1)\n class Actor:\n def foo(self):\n return None\n\n worker_1 = Actor.remote()\n ray.kill(worker_1)\n worker_2 = Actor.remote()\n assert ray.get(worker_2.foo.remote()) is None\n ray.kill(worker_2)\n\n worker_1 = Actor.options(max_restarts=1).remote()\n ray.kill(worker_1, no_restart=False)\n assert ray.get(worker_1.foo.remote()) is None\n\n ray.kill(worker_1, no_restart=False)\n worker_2 = Actor.remote()\n assert ray.get(worker_2.foo.remote()) is None\n\n\n@pytest.mark.skipif(\n client_test_enabled(), reason=\"client api doesn't support namespace right now.\"\n)\ndef test_internal_kv(ray_start_regular):\n import ray.experimental.internal_kv as kv\n\n assert kv._internal_kv_get(\"k1\") is None\n assert kv._internal_kv_put(\"k1\", \"v1\") is False\n assert kv._internal_kv_put(\"k1\", \"v1\") is True\n assert kv._internal_kv_get(\"k1\") == b\"v1\"\n\n assert kv._internal_kv_get(\"k1\", namespace=\"n\") is None\n assert kv._internal_kv_put(\"k1\", \"v1\", namespace=\"n\") is False\n assert kv._internal_kv_put(\"k1\", \"v1\", namespace=\"n\") is True\n assert kv._internal_kv_put(\"k1\", \"v2\", True, namespace=\"n\") is True\n assert kv._internal_kv_get(\"k1\", namespace=\"n\") == b\"v2\"\n\n assert kv._internal_kv_del(\"k1\") == 1\n assert kv._internal_kv_del(\"k1\") == 0\n assert kv._internal_kv_get(\"k1\") is None\n\n assert kv._internal_kv_put(\"k2\", \"v2\", namespace=\"n\") is False\n assert kv._internal_kv_put(\"k3\", \"v3\", namespace=\"n\") is False\n\n assert set(kv._internal_kv_list(\"k\", namespace=\"n\")) == {b\"k1\", b\"k2\", b\"k3\"}\n assert kv._internal_kv_del(\"k\", del_by_prefix=True, namespace=\"n\") == 3\n assert kv._internal_kv_del(\"x\", del_by_prefix=True, namespace=\"n\") == 0\n assert kv._internal_kv_get(\"k1\", namespace=\"n\") is None\n assert kv._internal_kv_get(\"k2\", namespace=\"n\") is None\n assert kv._internal_kv_get(\"k3\", namespace=\"n\") is None\n\n with pytest.raises(RuntimeError):\n kv._internal_kv_put(\"@namespace_\", \"x\", True)\n with pytest.raises(RuntimeError):\n kv._internal_kv_get(\"@namespace_\", namespace=\"n\")\n with pytest.raises(RuntimeError):\n kv._internal_kv_del(\"@namespace_def\", namespace=\"n\")\n with pytest.raises(RuntimeError):\n kv._internal_kv_list(\"@namespace_abc\", namespace=\"n\")\n\n\ndef test_run_on_all_workers(ray_start_regular):\n # This test is to ensure run_function_on_all_workers are executed\n # on all workers.\n @ray.remote\n class Actor:\n def __init__(self):\n self.jobs = []\n\n def record(self, job_id=None):\n if job_id is not None:\n self.jobs.append(job_id)\n return self.jobs\n\n a = Actor.options(name=\"recorder\", namespace=\"n\").remote() # noqa: F841\n driver_script = \"\"\"\nimport ray\nfrom pathlib import Path\n\ndef init_func(worker_info):\n a = ray.get_actor(\"recorder\", namespace=\"n\")\n a.record.remote(worker_info['worker'].worker_id)\n\nray.worker.global_worker.run_function_on_all_workers(init_func)\nray.init(address='auto')\n@ray.remote\ndef ready():\n a = ray.get_actor(\"recorder\", namespace=\"n\")\n assert ray.worker.global_worker.worker_id in ray.get(a.record.remote())\n\nray.get(ready.remote())\n\"\"\"\n run_string_as_driver(driver_script)\n run_string_as_driver(driver_script)\n run_string_as_driver(driver_script)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([\"-v\", __file__]))\n","repo_name":"santosh-shetkar-katonic/ray-cluster","sub_path":"python/ray/tests/test_basic_5.py","file_name":"test_basic_5.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10109268798","text":"import os\nimport unittest\n\nfrom pxr import Sdf\nfrom pxr import Usd\nfrom pxr import UsdGeom\nfrom pxr import Vt\nfrom pxr import Gf\n\nfrom maya import cmds\nfrom maya import standalone\n\n\nclass testUsdExportDisplayColor(unittest.TestCase):\n\n def _AssertPrimvar(self, primvar, expectedTypeName=None,\n expectedValues=None, expectedInterpolation=None,\n expectedIndices=None, expectedUnauthoredValuesIndex=None):\n self.assertTrue(primvar)\n\n if expectedInterpolation is None:\n expectedInterpolation = UsdGeom.Tokens.constant\n if expectedIndices is None:\n expectedIndices = Vt.IntArray()\n elif isinstance(expectedIndices, list):\n expectedIndices = Vt.IntArray(expectedIndices)\n if expectedUnauthoredValuesIndex is None:\n expectedUnauthoredValuesIndex = -1\n\n # This should work for undefined primvars.\n self.assertEqual(primvar.GetIndices(), expectedIndices)\n self.assertEqual(primvar.GetUnauthoredValuesIndex(),\n expectedUnauthoredValuesIndex)\n\n if expectedTypeName is None:\n self.assertFalse(primvar.IsDefined())\n # No further testing for primvars that we expect not to exist.\n return\n\n self.assertTrue(primvar.IsDefined())\n self.assertEqual(primvar.GetTypeName(), expectedTypeName)\n self.assertEqual(primvar.GetInterpolation(), expectedInterpolation)\n\n if expectedValues is None:\n self.assertFalse(primvar.GetAttr().HasAuthoredValueOpinion())\n self.assertEqual(primvar.Get(), None)\n else:\n for idx in range(len(primvar.Get())):\n val1 = primvar.Get()[idx]\n val2 = expectedValues[idx]\n if isinstance(val1, Gf.Vec3f):\n self.assertEqual(val1, val2) # both are 3-vectors\n continue\n self.assertAlmostEqual(val1, val2, places=5) # default==7\n\n def _AssertMeshDisplayColorAndOpacity(self, mesh, expectedColors=None,\n expectedOpacities=None, expectedInterpolation=None,\n expectedIndices=None, expectedUnauthoredValuesIndex=None):\n displayColorPrimvar = mesh.GetDisplayColorPrimvar()\n self._AssertPrimvar(\n displayColorPrimvar, Sdf.ValueTypeNames.Color3fArray,\n expectedColors, expectedInterpolation,\n expectedIndices, expectedUnauthoredValuesIndex)\n\n displayOpacityPrimvar = mesh.GetDisplayOpacityPrimvar()\n self._AssertPrimvar(\n displayOpacityPrimvar, Sdf.ValueTypeNames.FloatArray,\n expectedOpacities, expectedInterpolation,\n expectedIndices, expectedUnauthoredValuesIndex)\n\n\n @classmethod\n def tearDownClass(cls):\n standalone.uninitialize()\n\n @classmethod\n def setUpClass(cls):\n standalone.initialize('usd')\n cmds.file(os.path.abspath('UsdExportDisplayColorTest.ma'),\n open=True, force=True)\n\n # Export to USD.\n usdFilePath = os.path.abspath('UsdExportDisplayColorTest.usda')\n cmds.loadPlugin('pxrUsd')\n cmds.usdExport(mergeTransformAndShape=True,\n file=usdFilePath,\n shadingMode='none',\n exportDisplayColor=True)\n\n cls._stage = Usd.Stage.Open(usdFilePath)\n\n def testStageOpens(self):\n self.assertTrue(self._stage)\n\n def _GetCubeMesh(self, cubeName):\n cubePrimPath = '/UsdExportDisplayColorTest/Geom/CubeMeshes/%s' % cubeName\n cubePrim = self._stage.GetPrimAtPath(cubePrimPath)\n self.assertTrue(cubePrim)\n\n usdMesh = UsdGeom.Mesh(cubePrim)\n self.assertTrue(usdMesh)\n\n return usdMesh\n\n def testExportPolyCubeObjectLevelAssignment(self):\n \"\"\"\n Tests exporting a cube where the entire object is assigned one shader.\n \"\"\"\n cubeMesh = self._GetCubeMesh('ObjectLevelCube')\n\n expectedColors = Vt.Vec3fArray([(1.0, 1.0, 0.0)])\n expectedOpacities = Vt.FloatArray([0.4])\n self._AssertMeshDisplayColorAndOpacity(cubeMesh, expectedColors,\n expectedOpacities, UsdGeom.Tokens.constant)\n\n def testExportPolyCubeUniquePerFace(self):\n \"\"\"\n Tests exporting a cube where each face is assigned a unique shader.\n \"\"\"\n cubeMesh = self._GetCubeMesh('UniquePerFaceCube')\n\n expectedColors = Vt.Vec3fArray([\n (0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0),\n (0.0, 1.0, 0.0),\n (0.0, 0.0, 1.0),\n (1.0, 1.0, 0.0),\n (1.0, 0.0, 1.0)])\n expectedOpacities = Vt.FloatArray([\n 0.1,\n 0.85,\n 0.7,\n 0.55,\n 0.4,\n 0.25])\n expectedIndices = Vt.IntArray([5, 3, 0, 1, 2, 4])\n self._AssertMeshDisplayColorAndOpacity(cubeMesh, expectedColors,\n expectedOpacities, UsdGeom.Tokens.uniform, expectedIndices)\n\n def testExportPolyCubeSharedFaces(self):\n \"\"\"\n Tests exporting a cube where each of three pairs of faces have the\n same shader assigned.\n \"\"\"\n cubeMesh = self._GetCubeMesh('SharedFacesCube')\n\n expectedColors = Vt.Vec3fArray([\n (1.0, 0.0, 1.0),\n (1.0, 0.0, 0.0),\n (0.0, 0.0, 1.0)])\n expectedOpacities = Vt.FloatArray([\n 0.25,\n 0.85,\n 0.55])\n expectedIndices = Vt.IntArray([0, 2, 0, 1, 2, 1])\n self._AssertMeshDisplayColorAndOpacity(cubeMesh, expectedColors,\n expectedOpacities, UsdGeom.Tokens.uniform, expectedIndices)\n\n def testExportPolyCubeUnassigned(self):\n \"\"\"\n Tests exporting a cube that has no shader assigned at all.\n \"\"\"\n cubeMesh = self._GetCubeMesh('UnassignedCube')\n\n self._AssertMeshDisplayColorAndOpacity(cubeMesh)\n\n def testExportPolyCubeOneAssignedFace(self):\n \"\"\"\n Tests exporting a cube that has no object-level shader assigned and\n only one face that has an assigned shader.\n \"\"\"\n cubeMesh = self._GetCubeMesh('OneFaceCube')\n\n expectedColors = Vt.Vec3fArray([\n (0.0, 1.0, 0.0),\n (0.5, 0.5, 0.5)])\n expectedOpacities = Vt.FloatArray([\n 0.7,\n 0.0])\n\n expectedIndices = Vt.IntArray([0, 1, 1, 1, 1, 1])\n expectedUnauthoredValuesIndex = 1\n self._AssertMeshDisplayColorAndOpacity(cubeMesh, expectedColors,\n expectedOpacities, UsdGeom.Tokens.uniform,\n expectedIndices, expectedUnauthoredValuesIndex)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"JosephTom/PixarUSD","sub_path":"third_party/maya/lib/usdMaya/testenv/testUsdExportDisplayColor.py","file_name":"testUsdExportDisplayColor.py","file_ext":"py","file_size_in_byte":6630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"43490261655","text":"#base class yani temel sınıf ve \n#parent class \n\n\n#child class bu classlar temel sınıflardan oluşur.\n\nfrom _typeshed import Self\n\n\nclass Person:\n def __init__(self,name,surname,age) -> None:\n self.name = name\n self.surname = surname\n self.age = age\n print(\"Person Nesnesi Türetildi.\")\n\n# class intro(self):\n# print(self.name,self.surname,self.age)\n\nclass Student(Person):\n def __init__(self,name,surname,age,number):\n Person.__init__(self,name,surname,age)\n self.number = number\n print(\"Student nesnesi üretildi\")\n \n \n \n\nclass Teacher(Person):\n def __init__(self, name, surname, age,branch) -> None:\n Person.__init__(name, surname, age)\n self.branch = branch\n print(\"Teacher Nesnesi Üretildi.\")\n \n \n\n# t1 = Teacher()\np1 = Person(\"Sefa\",\"Pınar\",40)\n# s1 = Student()\n\nprint(p1.name,p1.surname,p1.age)","repo_name":"sefaapinar/Python_Advanced_Training","sub_path":"Intheritance(Kalıtım)/child-genişletmek.py","file_name":"child-genişletmek.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"12055443449","text":"from pathlib import Path\n\nfrom golem.core.optimisers.genetic.gp_params import GPAlgorithmParameters\nfrom golem.core.optimisers.optimization_parameters import GraphRequirements\nfrom golem.core.optimisers.optimizer import GraphGenerationParams\n\nfrom gefest.core.configs.utils import load_config\nfrom gefest.core.opt.adapters.configuration_mapping import (\n map_into_gpa,\n map_into_graph_generation_params,\n map_into_graph_requirements,\n)\n\nfilepath = Path(__file__)\ntest_config = load_config(str(filepath.parent) + '/test_config.py')\n\n\ndef test_mapping_into_graph_requirements():\n \"\"\"Test OptimizationParams translation into GraphRequirements.\"\"\"\n graph_requirements = map_into_graph_requirements(test_config)\n assert isinstance(graph_requirements, GraphRequirements)\n\n\ndef test_mapping_into_graph_generation_params():\n \"\"\"Test OptimizationParams translation into GraphGenerationParams.\"\"\"\n graph_generation_params = map_into_graph_generation_params(test_config)\n assert isinstance(graph_generation_params, GraphGenerationParams)\n\n\ndef test_mapping_into_gpa():\n \"\"\"Test OptimizationParams translation into GPAlgorithmParameters.\"\"\"\n gpa = map_into_gpa(test_config)\n assert isinstance(gpa, GPAlgorithmParameters)\n","repo_name":"aimclub/GEFEST","sub_path":"test/test_adapters.py","file_name":"test_adapters.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"}
+{"seq_id":"39586676116","text":"import tensorflow as tf\nfrom ddpg.actor import Actor\nfrom ddpg.critic import Critic\nfrom ddpg.replay_buffer import RBuffer\n\n\nclass Agent():\n def __init__(self, env, hparams):\n n_action = len(env.action_space.high)\n self.actor_main = Actor(n_action, hparams)\n self.actor_target = Actor(n_action, hparams)\n self.critic_main = Critic(hparams)\n self.critic_target = Critic(hparams)\n self.batch_size = 64\n self.n_actions = len(env.action_space.high)\n self.a_opt = tf.keras.optimizers.Adam(hparams['lr'])\n # self.actor_target = tf.keras.optimizers.Adam(.001)\n self.c_opt = tf.keras.optimizers.Adam(hparams['lr'])\n # self.critic_target = tf.keras.optimizers.Adam(.002)\n self.memory = RBuffer(1_00_000, env.observation_space.shape, len(env.action_space.high))\n self.trainstep = 0\n self.replace = 5\n self.gamma = 0.99\n self.min_action = env.action_space.low[0]\n self.max_action = env.action_space.high[0]\n\n def act(self, state, evaluate=False):\n state = tf.convert_to_tensor([state], dtype=tf.float32)\n actions = self.actor_main(state)\n if not evaluate:\n actions += tf.random.normal(shape=[self.n_actions], mean=0.0, stddev=0.1)\n\n actions = self.max_action * (tf.clip_by_value(actions, self.min_action, self.max_action))\n # print(actions)\n return actions[0]\n\n def savexp(self, state, next_state, action, done, reward):\n self.memory.storexp(state, next_state, action, done, reward)\n\n def update_target(self):\n self.actor_target.set_weights(self.actor_main.get_weights())\n self.critic_target.set_weights(self.critic_main.get_weights())\n\n def train(self):\n if self.memory.cnt < self.batch_size:\n return\n\n states, next_states, rewards, actions, dones = self.memory.sample(self.batch_size)\n\n states = tf.convert_to_tensor(states, dtype=tf.float32)\n next_states = tf.convert_to_tensor(next_states, dtype=tf.float32)\n rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)\n actions = tf.convert_to_tensor(actions, dtype=tf.float32)\n # dones = tf.convert_to_tensor(dones, dtype= tf.bool)\n\n with tf.GradientTape() as tape1, tf.GradientTape() as tape2:\n\n target_actions = self.actor_target(next_states)\n target_next_state_values = tf.squeeze(self.critic_target(next_states, target_actions), 1)\n critic_value = tf.squeeze(self.critic_main(states, actions), 1)\n target_values = rewards + self.gamma * target_next_state_values * dones\n critic_loss = tf.keras.losses.MSE(target_values, critic_value)\n\n new_policy_actions = self.actor_main(states)\n actor_loss = -self.critic_main(states, new_policy_actions)\n actor_loss = tf.math.reduce_mean(actor_loss)\n\n grads1 = tape1.gradient(actor_loss, self.actor_main.trainable_variables)\n grads2 = tape2.gradient(critic_loss, self.critic_main.trainable_variables)\n self.a_opt.apply_gradients(zip(grads1, self.actor_main.trainable_variables))\n self.c_opt.apply_gradients(zip(grads2, self.critic_main.trainable_variables))\n\n if self.trainstep % self.replace == 0:\n self.update_target()\n\n self.trainstep += 1\n","repo_name":"PawelMlyniec/Walker-2D","sub_path":"ddpg/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35116232474","text":"\"\"\" File: obfuscation/gui.py\nDefines generic widget generation functions in addition to subclasses \nof the obfuscation transformation unit classes that provide support \nfor graphical user interface operation, through the addition of `get_gui`, \n`edit_gui` and `load_gui_values` methods. This allows these\ntransformations to be instantiated, edited and used through the GUI.\n\"\"\"\nfrom ..config import GuiDefaults as Df\nfrom . import *\nfrom PyQt6.QtWidgets import *\nfrom PyQt6.QtGui import *\nfrom PyQt6.QtCore import Qt\nfrom typing import Tuple, Mapping, Any\n\n\ndef set_no_options_widget(parent: QWidget) -> None:\n \"\"\"Creates the transformation options menu for any obfuscation transformation\n that does not have any options, simply displaying a small centred label with the\n text \"No options available.\".\n\n Args:\n parent (QWidget): The parent widget to put this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n no_options_label = QLabel(\"No options available.\", parent)\n no_options_label.setFont(QFont(Df.DEFAULT_FONT, 12))\n no_options_label.setStyleSheet(\"QLabel{color: #727463;}\")\n layout.addWidget(no_options_label, alignment=Qt.AlignmentFlag.AlignCenter)\n parent.setLayout(layout)\n\n\ndef generate_integer_widget(\n label_msg: str,\n tooltip_msg: str,\n init_val: int,\n min_val: int,\n max_val: int,\n parent: QWidget,\n) -> Tuple[QWidget, QLineEdit]:\n \"\"\"Creates a GUI form for transformation option integer entry, displaying a\n left-justified label next to a right-justified entry that only accepts valid\n integer value inputs.\n\n Args:\n label_msg (str): The text message to label the integer entry with.\n tooltip_msg (str): The text message to display as a tooltip on the label.\n init_val (int): The initial integer value used to occupy the entry.\n min_val (int): The minimum allowed integer value to enter into the entry.\n max_val (int): The maximum allowed integer value to enter into the entry.\n parent (QWidget): The parent widget that this widget will be placed in.\n\n Returns:\n Tuple[QWidget, QLineEdit]: Returns a tuple, where the first item is the\n created widget, and the second is the exact entry UI element to allow\n you to access a user-entered value.\n \"\"\"\n integer_widget = QWidget(parent)\n layout = QHBoxLayout(integer_widget)\n layout.setContentsMargins(0, 0, 0, 0)\n label = QLabel(label_msg, integer_widget)\n label.setFont(QFont(Df.DEFAULT_FONT, 12))\n label.setToolTip(tooltip_msg)\n QToolTip.setFont(QFont(Df.DEFAULT_FONT, 13))\n label.setStyleSheet(\"QLabel{color: #727463;}\\n\" + Df.GENERAL_TOOLTIP_CSS)\n layout.addWidget(label, alignment=Qt.AlignmentFlag.AlignLeft)\n layout.addStretch()\n entry = QLineEdit(str(init_val), integer_widget)\n entry.setFont(QFont(Df.DEFAULT_FONT, 12))\n entry.setValidator(QIntValidator(min_val, max_val, entry))\n entry.setStyleSheet(\n \"\"\"\n QLineEdit{\n background-color: #161613;\n border: solid;\n border-width: 3px;\n border-color: #161613;\n color: #727463;\n }\"\"\"\n )\n layout.addWidget(entry, alignment=Qt.AlignmentFlag.AlignRight)\n integer_widget.setLayout(layout)\n return (integer_widget, entry)\n\n\ndef generate_float_widget(\n label_msg: str,\n tooltip_msg: str,\n init_val: float,\n min_val: float,\n max_val: float,\n parent: QWidget,\n) -> Tuple[QWidget, QLineEdit]:\n \"\"\"Creates a GUI form for transformation option float entry, displaying a\n left-justified label next to a right-justified entry that only accepts valid\n float value inputs.\n\n Args:\n label_msg (str): The text message to label the float entry with.\n tooltip_msg (str): The text message to display as a tooltip on the label.\n init_val (float): The initial float value used to occupy the entry.\n min_val (float): The minimum allowed float value to enter into the entry.\n max_val (float): The maximum allowed float value to enter into the entry.\n parent (QWidget): The parent widget that this widget will be placed in.\n\n Returns:\n Tuple[QWidget, QLineEdit]: Returns a tuple, where the first item is the\n created widget, and the second is the exact entry UI element to allow\n you to access a user-entered value.\n \"\"\"\n float_widget = QWidget(parent)\n layout = QHBoxLayout(float_widget)\n layout.setContentsMargins(0, 0, 0, 0)\n label = QLabel(label_msg, float_widget)\n label.setFont(QFont(Df.DEFAULT_FONT, 12))\n label.setToolTip(tooltip_msg)\n QToolTip.setFont(QFont(Df.DEFAULT_FONT, 13))\n label.setStyleSheet(\"QLabel{color: #727463;}\\n\" + Df.GENERAL_TOOLTIP_CSS)\n layout.addWidget(label, alignment=Qt.AlignmentFlag.AlignLeft)\n layout.addStretch()\n entry = QLineEdit(str(init_val), float_widget)\n entry.setFont(QFont(Df.DEFAULT_FONT, 12))\n entry.setValidator(QDoubleValidator(min_val, max_val, 1000, entry))\n entry.setStyleSheet(\n \"\"\"\n QLineEdit{\n background-color: #161613;\n border: solid;\n border-width: 3px;\n border-color: #161613;\n color: #727463;\n }\"\"\"\n )\n layout.addWidget(entry, alignment=Qt.AlignmentFlag.AlignRight)\n float_widget.setLayout(layout)\n return (float_widget, entry)\n\n\ndef generate_radio_button_widget(\n label_msg: str,\n tooltip_msg: str,\n options: Mapping[str, Any],\n init_val: str,\n parent: QWidget,\n option_tooltips: Optional[Mapping[str, str]] = None,\n) -> Tuple[QWidget, dict[QRadioButton, Any]]:\n \"\"\"Creates a GUI form for radio button UI elements that allow the mutually\n exclusive selection of an option from some set of options. A name label is\n displayed above the set of optiosn, and each individual radio button has\n its own option label describing what the option is.\n\n Args:\n label_msg (str): The text message to label the entire selection with.\n tooltip_msg (str): The text message to display as a tooltip on the top label.\n options (Mapping[str, Any]): The set of possible radio button options, mapping\n their string label to some value that they should be used to represent.\n init_val (str): The initial string name of the option that should be selected.\n parent (QWidget): The parent widget that this widget will be placed in.\n\n Returns:\n Tuple[QWidget, dict[QRadioButton, Any]]: Returns a tuple, where the first item\n is the created widget, and the second is a dictionary of created radio buttons \n which can be checked to determine the user input value.\n \"\"\"\n if option_tooltips is None:\n option_tooltips = {}\n radio_widget = QWidget(parent)\n layout = QVBoxLayout(radio_widget)\n layout.setSpacing(0)\n layout.setContentsMargins(0, 0, 0, 0)\n label = QLabel(label_msg, radio_widget)\n label.setFont(QFont(Df.DEFAULT_FONT, 12))\n label.setToolTip(tooltip_msg)\n QToolTip.setFont(QFont(Df.DEFAULT_FONT, 13))\n label.setStyleSheet(\"QLabel{color: #727463;}\\n\" + Df.GENERAL_TOOLTIP_CSS)\n layout.addWidget(label, alignment=Qt.AlignmentFlag.AlignLeft)\n button_widget = QWidget(radio_widget)\n button_layout = QVBoxLayout(button_widget)\n button_layout.setContentsMargins(15, 5, 5, 5)\n radio_buttons = {}\n for option in options.keys():\n radio_button = QRadioButton(option, radio_widget)\n if option == init_val:\n radio_button.setChecked(True)\n if option in option_tooltips:\n radio_button.setToolTip(option_tooltips[option])\n radio_button.setFont(QFont(Df.DEFAULT_FONT, 11))\n radio_button.setStyleSheet(\n Df.GENERAL_TOOLTIP_CSS\n + \"\"\"\n QRadioButton{\n color: #727463;\n }\n QRadioButton::indicator{\n width: 10px;\n height: 10px;\n border-radius: 7px;\n }\n QRadioButton::indicator::checked{\n background-color: none;\n border: 2px solid white;\n }\n QRadioButton::indicator::unchecked{\n background-color: white;\n border: 2px solid white;\n }\"\"\"\n )\n button_layout.addWidget(radio_button, 1)\n radio_buttons[radio_button] = options[option]\n button_widget.setLayout(button_layout)\n layout.addWidget(button_widget)\n radio_widget.setLayout(layout)\n return (radio_widget, radio_buttons)\n\n\ndef generate_checkbox_widget(\n label_msg: str, tooltip_msg: str, init: bool, parent: QWidget\n) -> Tuple[QWidget, QCheckBox]:\n \"\"\"Creates a GUI form for transformation option checkbox entry, displaying\n a left-justified label next to some binary checkbox that can be freely checked\n or unchecked by users.\n\n Args:\n label_msg (str): The text message to label the checkbox with.\n tooltip_msg (str): The text message to display as a tooltip on the label\n and checkbox.\n init (bool): Whether the checkbox should initially be checked or not.\n parent (QWidget): The parent widget that this widget will be placed in.\n\n Returns:\n Tuple[QWidget, QCheckBox]: Returns a tuple, where the first item is the\n created widget, and the second is the exact checkbox UI element to allow\n you to access a user-entered value.\n \"\"\"\n checkbox_widget = QWidget(parent)\n layout = QHBoxLayout(checkbox_widget)\n layout.setSpacing(20)\n layout.setContentsMargins(0, 0, 0, 0)\n label = QLabel(label_msg, checkbox_widget)\n label.setFont(QFont(Df.DEFAULT_FONT, 12))\n label.setToolTip(tooltip_msg)\n QToolTip.setFont(QFont(Df.DEFAULT_FONT, 13))\n label.setStyleSheet(\"QLabel{color: #727463;}\\n\" + Df.GENERAL_TOOLTIP_CSS)\n layout.addWidget(label, alignment=Qt.AlignmentFlag.AlignCenter)\n checkbox = QCheckBox(checkbox_widget)\n checkbox.setFont(QFont(Df.DEFAULT_FONT, 12))\n checkbox.setStyleSheet(\n \"\"\"\n QCheckBox{\n color: #727463\n }\"\"\"\n )\n checkbox.setChecked(init)\n layout.addWidget(checkbox, alignment=Qt.AlignmentFlag.AlignCenter)\n checkbox_widget.setLayout(layout)\n return (checkbox_widget, checkbox)\n\n\ndef generate_checkboxes_widget(\n label_msg: str,\n tooltip_msg: str,\n options: Mapping[str, Any],\n init_vals: Iterable[str],\n parent: QWidget,\n option_tooltips: Mapping[str, str] | None = None,\n) -> Tuple[QWidget, dict[QCheckBox, Any]]:\n \"\"\"Creates a GUI form for a grouped set of checkbox entries for a transformation\n option, to allow users to select any subset of options from a specific set, as\n each option can be freely checked or unchecked by users (unlike radio buttons).\n\n Args:\n label_msg (str): The text message to label the entire selection with.\n tooltip_msg (str): The text message to display as a tooltip on the top label.\n options (Mapping[str, Any]): The set of possible checkbox options, mapping\n their string label to some value that they should be used to represent.\n init_vals (Iterable[str]): The set of string labels that should be used as\n checkboxes in the selection.\n parent (QWidget): The parent widget that this widget will be placed in.\n option_tooltips (Mapping[str, str] | None): An optional argument allowing\n tooltips to be assigned to each individual checkbox label. Defaults to None.\n\n Returns:\n Tuple[QWidget, dict[QCheckbox, Any]]: Returns a tuple, where the first item\n is the created widget, and the second is a dictionary of created checkboxes \n which can be checked to determine the user input option subset.\n \"\"\"\n labelled_widget = QWidget(parent)\n layout = QVBoxLayout(labelled_widget)\n layout.setSpacing(0)\n layout.setContentsMargins(0, 0, 0, 0)\n label = QLabel(label_msg, labelled_widget)\n label.setFont(QFont(Df.DEFAULT_FONT, 12))\n label.setToolTip(tooltip_msg)\n QToolTip.setFont(QFont(Df.DEFAULT_FONT, 13))\n label.setStyleSheet(\"QLabel{color: #727463;}\\n\" + Df.GENERAL_TOOLTIP_CSS)\n layout.addWidget(label, alignment=Qt.AlignmentFlag.AlignLeft)\n checkbox_widget = QWidget(labelled_widget)\n checkbox_layout = QVBoxLayout(checkbox_widget)\n checkbox_layout.setContentsMargins(15, 5, 5, 5)\n checkbox_layout.setSpacing(0)\n checkboxes = {}\n for option in options.keys():\n checkbox = QCheckBox(option, checkbox_widget)\n checkbox.setFont(QFont(Df.DEFAULT_FONT, 11))\n checkbox.setStyleSheet(\n Df.GENERAL_TOOLTIP_CSS\n + \"\"\"\n QCheckBox{\n color: #727463\n }\"\"\"\n )\n checkbox.setChecked(option in init_vals or options[option] in init_vals)\n if option_tooltips is not None and option in option_tooltips:\n checkbox.setToolTip(option_tooltips[option])\n checkbox_layout.addWidget(checkbox, 1, alignment=Qt.AlignmentFlag.AlignLeft)\n checkboxes[checkbox] = options[option]\n checkbox_widget.setLayout(checkbox_layout)\n layout.addWidget(checkbox_widget)\n labelled_widget.setLayout(layout)\n return (labelled_widget, checkboxes)\n\n\nclass GuiIdentityUnit(IdentityUnit):\n \"\"\"The identity transformation with added graphical interfaces.\"\"\"\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing an identity transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n set_no_options_widget(parent)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual identity\n unit. In this case, no such options exist.\"\"\"\n return\n\n def from_json(json_str: str) -> \"GuiIdentityUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiIdentityUnit.\n\n Returns:\n GuiIdentityUnit: The corresponding GuiIdentityUnit object.\"\"\"\n return GuiIdentityUnit()\n\n def get_gui() -> \"GuiIdentityUnit\":\n \"\"\"Creates an identity transformation loaded with default values to allow GUI\n interaction with the transformation.\n\n Returns:\n GuiIdentityUnit: the identity unit transformation with default values.\"\"\"\n return GuiIdentityUnit()\n\n\nclass GuiFuncArgumentRandomiseUnit(FuncArgumentRandomiseUnit):\n \"\"\"The function argument randomisation transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiFuncArgumentRandomiseUnit, extending the original\n transformation unit to also initialise entries and checkbox attributes.\n \"\"\"\n super(GuiFuncArgumentRandomiseUnit, self).__init__(*args, **kwargs)\n self.extra_args_entry = None\n self.probability_entry = None\n self.randomise_checkbox = None\n\n def edit_gui(self, parent: QWidget) -> None:\n layout = QVBoxLayout(parent)\n\n # Add an integer entry for the number of extra arguments\n extra_args, self.extra_args_entry = generate_integer_widget(\n \"Extra Args:\",\n \"The number of additional spurious arguments to add to each function.\\n\"\n \"This must be an integer >= 0. If 0 is selected, then the argument\\n\"\n \"list will just be randomised but not otherwise modified.\",\n self.extra_args,\n 0,\n 2147483647,\n parent,\n )\n layout.addWidget(extra_args, 1, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add a float entry for the probability of using existing variables\n probability, self.probability_entry = generate_float_widget(\n \"Probability:\",\n \"The probability that a function argument, linked to an extra spurious parameter\\n\"\n \"that has been added to a function's specification, will be filled using some\\n\"\n \"matching variable value from the program instead of some new constant value,\\n\"\n \"where this pr obability must be anumber in the range 0 <= p <= 1. A probability\\n\"\n \"of 0 means all spurious arguments will be constants (less secure), a probability of\\n\"\n \"0.5 means a 50 percent chance of using a variable where a variable can be used, and\\n\"\n \"a 1.0 means that all spurious arguments will use defined variables were possible.\\n\"\n \" This allows you to achieve a mixture of constants and variables for security.\",\n self.probability,\n 0.0,\n 1.0,\n parent,\n )\n layout.addWidget(probability, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add a checkbox for randomising the order of function arguments.\n randomise, self.randomise_checkbox = generate_checkbox_widget(\n \"Randomise Arg Order?\",\n \"Where possible, randomises the order of arguments in function definitions and calls such\\n\"\n \"that any abstract symbolic meaning implied by their order is lost. This is particularly\\n\"\n \"effective when combined with several additional new arguments, as it can cause the real\\n\"\n \"arguments to become lost. When combined with opaque predicate insertion/augmentation,\\n\"\n \"these fake arguments cannot be automatically removed using their usage information,\\n\"\n \"creating a very effective obfuscation tool.\",\n self.randomise,\n parent,\n )\n layout.addWidget(randomise, 1, alignment=Qt.AlignmentFlag.AlignTop)\n layout.addStretch(3)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual function\n argument randomisation unit. In this case, we load the number of extra\n arguments, the probability, and whether to randomise the order or not.\"\"\"\n # Load the number of extra arguments (>= 0, default 3)\n if self.extra_args_entry is not None:\n try:\n self.extra_args = int(self.extra_args_entry.text())\n if self.extra_args < 0:\n self.extra_args = 0\n self.traverser.extra = self.extra_args\n except:\n self.extra_args = 3\n self.traverser.extra = 3\n # Load the probability (0.0 <= p <= 1.0, default 0.75)\n if self.probability_entry is not None:\n try:\n self.probability = float(self.probability_entry.text())\n if self.probability > 1.0:\n self.probability = 1.0\n elif self.probability < 0.0:\n self.probability = 0.0\n self.traverser.variable_probability = self.probability\n except:\n self.probability = 0.75\n self.traverser.variable_probability = self.probability\n # Load whether to randomise or not.\n if self.randomise_checkbox is not None:\n self.randomise = self.randomise_checkbox.isChecked()\n self.traverser.randomise = self.randomise\n\n def from_json(json_str: str) -> \"GuiFuncArgumentRandomiseUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiFuncArgumentRandomiseUnit.\n\n Returns:\n GuiFuncArgumentRandomiseUnit: The corresponding GuiFuncArgumentRandomiseUnit object.\"\"\"\n unit = FuncArgumentRandomiseUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiFuncArgumentRandomiseUnit(\n unit.extra_args, unit.probability, unit.randomise\n )\n\n def get_gui() -> \"GuiFuncArgumentRandomiseUnit\":\n \"\"\"Creates a function argument randomisation transformation loaded with default\n values to allow GUI interaction with the transformation.\n\n Returns:\n GuiFuncArgumentRandomiseUnit: the function argument randomisation transformation\n with default values.\"\"\"\n return GuiFuncArgumentRandomiseUnit(3, 0.75, True)\n\n\nclass GuiStringEncodeUnit(StringEncodeUnit):\n \"\"\"The string literal encoding transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiStringEncodeUnit, extending the original\n transformation unit to also initialise attributes for the style buttons.\n \"\"\"\n super(GuiStringEncodeUnit, self).__init__(*args, **kwargs)\n self.style_buttons = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing a string encoding transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n # Add radio buttons for selecting the string encoding style\n style, self.style_buttons = generate_radio_button_widget(\n \"Encoding Style:\",\n \"The encoding style to use when encoding strings in the program, which\\n\"\n \"dictates how it is chosen what encodings characters are replaced with.\",\n {style.value: style for style in StringEncodeTraverser.Style},\n self.style.value,\n parent,\n {\n StringEncodeTraverser.Style.OCTAL.value: \"Encode each character as its octal (base-8) representation where possible.\\n\"\n ' e.g. \"hello\" -> \"\\\\150\\\\145\\\\154\\\\154\\\\157\".',\n StringEncodeTraverser.Style.HEX.value: \"Encode each character as its hexadecimal (base-16) representation where possbible.\\n\"\n ' e.g. \"hello\" -> \"\\\\x68\\\\x65\\\\x6c\\\\x6c\\\\x6f\".',\n StringEncodeTraverser.Style.MIXED.value: \"Encode each character as either its octal (base-8) or hexadecimal (base-16)\\n\"\n \"representation where possible, choosing randomly between the two.\\n\"\n ' e.g. \"hello\" -> \"\\\\x68\\\\145\\\\154\\\\x6c\\\\157\".',\n StringEncodeTraverser.Style.ALL.value: \"Encode each character as either itself (no change), its octal (base-8) representation\\n\"\n \"or its hexadecimal (base-16) representation, choosing randomly between all 3 options.\\n\"\n ' e.g. \"hello\" -> \"\\\\150e\\\\x6cl\\\\x6f\".',\n },\n )\n layout.addWidget(style, 1, alignment=Qt.AlignmentFlag.AlignTop)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual string\n encoding unit. In this case, we load the encoding style only.\"\"\"\n if self.style_buttons is not None and len(self.style_buttons) > 0:\n for button, style in self.style_buttons.items():\n if button.isChecked():\n self.style = style\n self.traverser.style = style\n break\n\n def from_json(json_str: str) -> \"GuiStringEncodeUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiStringEncodeUnit.\n\n Returns:\n GuiStringEncodeUnit: The corresponding GuiStringEncodeUnit object.\"\"\"\n unit = StringEncodeUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiStringEncodeUnit(unit.style)\n\n def get_gui() -> \"GuiStringEncodeUnit\":\n \"\"\"Creates a string encoding transformation loaded with default values to allow\n GUI interaction with the transformation.\n\n Returns:\n GuiStringEncodeUnit: the string encoding transformation with default values.\"\"\"\n return GuiStringEncodeUnit(StringEncodeTraverser.Style.MIXED)\n\n\nclass GuiIntegerEncodeUnit(IntegerEncodeUnit):\n \"\"\"The integer literal encoding transformation with added graphical interfaces.\"\"\"\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing an integer encoding transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n set_no_options_widget(parent)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual integer\n encoding transformation unit. In this case, no such options exist.\"\"\"\n return\n\n def from_json(json_str: str) -> \"GuiIntegerEncodeUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiIntegerEncodeUnit.\n\n Returns:\n GuiIntegerEncodeUnit: The corresponding GuiIntegerEncodeUnit object.\"\"\"\n unit = IntegerEncodeUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiIntegerEncodeUnit()\n\n def get_gui() -> \"GuiIntegerEncodeUnit\":\n \"\"\"Creates an integer encoding transformation loaded with default values to allow\n GUI interaction with the transformation.\n\n Returns:\n GuiIntegerEncodeUnit: the integer encoding transformation with default values.\"\"\"\n return GuiIntegerEncodeUnit()\n\n\nclass GuiIdentifierRenameUnit(IdentifierRenameUnit):\n \"\"\"The identifier renaming transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiIdentifierRenameUnit, extending the original\n transformation unit to also initialise attributes for the style buttons and\n a checkbox for minimising identifier usage.\n \"\"\"\n super(GuiIdentifierRenameUnit, self).__init__(*args, **kwargs)\n self.style_buttons = None\n self.minimise_idents_checkbox = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing an identifier renaming transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n\n # Add radio buttons for selecting the name generation style\n style, self.style_buttons = generate_radio_button_widget(\n \"Renaming Style:\",\n \"The renaming style to use when renaming identifiers throughout the\\n\"\n \"program, which dictates how new identifiers are chosen to replace\\n\"\n \"existing names.\",\n {style.value: style for style in IdentifierRenameUnit.Style},\n self.style.value,\n parent,\n {\n IdentifierRenameUnit.Style.COMPLETE_RANDOM.value: \"Generate new identifiers that are completely random strings of 4-19 characters.\\n\"\n \" e.g. tcEM7, aA_LsaUdhnh, YPWnW0XE.\",\n IdentifierRenameUnit.Style.ONLY_UNDERSCORES.value: \"Generate new identifiers that consist of solely the underscore character '_'.\\n\"\n \" e.g. _, _____, ________.\",\n IdentifierRenameUnit.Style.MINIMAL_LENGTH.value: \"Generate new identifiers that occupy the minimum space possible as a whole, by\\n\"\n \"iterating through available symbols sequentially.\\n\"\n \" e.g. a, b, c, d, e, ...\",\n IdentifierRenameUnit.Style.I_AND_L.value: \"Generate new identifiers that each comprise of the exact same amount of characters,\\n\"\n \"using random combinations of only the characters 'I' and 'l'. This makes it hard to\\n\"\n \"determine separate identifiers via. differences in length, as in e.g. underscore\\n\"\n \"renaming.\\n\"\n \" e.g. IllIIlIIlIlIIlll, llIIlIlIlllllIlI, lIIlllIllIIIIIII.\",\n },\n )\n layout.addWidget(style, 1, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add a checkbox for minimising identifier usage or not.\n minimise_idents, self.minimise_idents_checkbox = generate_checkbox_widget(\n \"Minimise Identifiers?\",\n \"Attempts to greedily re-use identifier names whenever possible, such that the minimum\\n\"\n \"number of unique names are used throughout the program, and the maximum number of\\n\"\n \"different programming constructs are named the same thing. This option exploits variable\\n\"\n \"shadowing within scopes, the different naming systems of labels/structures and other\\n\"\n \"constructs, and analysis of identifier usage and liveness. [WARNING: VERY EXPERIMENTAL].\",\n self.minimise_idents,\n parent,\n )\n layout.addWidget(minimise_idents, 1, alignment=Qt.AlignmentFlag.AlignTop)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual identifier\n renaming unit. In this case, we load the name generation style and whether to\n use minimial identifiers or not.\"\"\"\n # Load the name generation style\n if self.style_buttons is not None and len(self.style_buttons) > 0:\n for button, style in self.style_buttons.items():\n if button.isChecked():\n self.style = style\n break\n # Load whether to minimise identifier usage or not\n if self.minimise_idents_checkbox is not None:\n self.minimise_idents = self.minimise_idents_checkbox.isChecked()\n\n def from_json(json_str: str) -> \"GuiIdentifierRenameUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiIdentifierRenameUnit.\n\n Returns:\n GuiIdentifierRenameUnit: The corresponding GuiIdentifierRenameUnit object.\"\"\"\n unit = IdentifierRenameUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiIdentifierRenameUnit(unit.style, unit.minimise_idents)\n\n def get_gui() -> \"GuiIdentifierRenameUnit\":\n \"\"\"Creates an identifier renaming transformation loaded with default values to allow\n GUI interaction with the transformation.\n\n Returns:\n GuiIdentifierRenameUnit: the identifier renaming transformation with default values.\"\"\"\n return GuiIdentifierRenameUnit(\n IdentifierRenameUnit.Style.COMPLETE_RANDOM, False\n )\n\n\nclass GuiArithmeticEncodeUnit(ArithmeticEncodeUnit):\n \"\"\"The integer arithmetic encoding transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiArithmeticEncodeUnit, extending the original\n transformation unit to also initialise attributes for the encoding depth entry.\n \"\"\"\n super(GuiArithmeticEncodeUnit, self).__init__(*args, **kwargs)\n self.depth_entry = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing an arithmetic encoding transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n\n # Add an integer entry for inputting the recursive depth.\n depth, self.depth_entry = generate_integer_widget(\n \"Recursive Depth:\",\n \"The maximum recursive depth of performed arithmetic encoding. Arithmetic\\n\"\n \"operations within encoded arithmetic operations can be recursively encoded\\n\"\n \"to increase code complexity. This must be at least 0 (which does nothing),\\n\"\n \"but a value > 5 is not recommended due to the large potential slowdown, as\\n\"\n \"code exponentially increases in size.\",\n self.level,\n 0,\n 2147483647,\n parent,\n )\n layout.addWidget(depth, 1, alignment=Qt.AlignmentFlag.AlignTop)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual arithmetic\n encoding unit. In this case, we load the recursive encoding depth.\"\"\"\n # Load the encoding depth (>= 0, default 1)\n if self.depth_entry is not None:\n try:\n self.level = int(self.depth_entry.text())\n if self.level < 0:\n self.level = 0\n self.traverser.transform_depth = self.level\n except:\n self.level = 1\n self.traverser.transform_depth = 1\n\n def from_json(json_str: str) -> None:\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiArithmeticEncodeUnit.\n\n Returns:\n GuiArithmeticEncodeUnit: The corresponding GuiArithmeticEncodeUnit object.\"\"\"\n unit = ArithmeticEncodeUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiArithmeticEncodeUnit(unit.level)\n\n def get_gui() -> \"GuiArithmeticEncodeUnit\":\n \"\"\"Creates an integer arithmetic encoding transformation loaded with default values\n to allow GUI interaction with the transformation.\n\n Returns:\n GuiArithmeticEncodeUnit: the arithmetic encoding transformation with default values.\"\"\"\n return GuiArithmeticEncodeUnit(1)\n\n\nclass GuiAugmentOpaqueUnit(AugmentOpaqueUnit):\n \"\"\"The opaque augmentation transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiAugmentOpaqueUnit, extending the original\n transformation unit to also initialise attributes for the style checkboxes,\n and the probability and number entries.\n \"\"\"\n super(GuiAugmentOpaqueUnit, self).__init__(*args, **kwargs)\n self.style_checkboxes = None\n self.probability_entry = None\n self.number_entry = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing an opaque predicate augmentation\n transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n\n # Add checkboxes for selecting opaque predicate argument generation styles\n tooltips = {\n OpaqueAugmenter.Style.INPUT.value: \"Opaque predicates can be generated using user inputs (function parameters).\",\n OpaqueAugmenter.Style.ENTROPY.value: \"Opaque predicates can be generated using entropic (random) variables, which\\n\"\n \"are created globally and initialised with random values at the start of the\\n\"\n \"main() function. In the current implementation, for every random variable\\n\"\n \"that is needed, it is decided at random whether to use an existing variable\\n\"\n \"or to make a new one (25 percent chance), to create good diversity and increase\\n\"\n \"complexity throughout the program.\",\n }\n styles, self.style_checkboxes = generate_checkboxes_widget(\n \"Predicate Style:\",\n \"The opaque predicate generation styles that can be used by the program.\\n\"\n \"This simply refers to the types of inputs that can be utilised to make\\n\"\n \"opaque predicates, such as using user input (function parameters) or\\n\"\n \"using random variables (entropy).\",\n {\n \" \".join(style.value.split(\" \")[3:]).capitalize(): style\n for style in OpaqueAugmenter.Style\n },\n set(self.styles),\n parent,\n dict(\n (\" \".join(key.split(\" \")[3:]).capitalize(), val)\n for key, val in tooltips.items()\n ),\n )\n layout.addWidget(styles, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add a float entry for selecting the probability of augmenting conditionals\n probability, self.probability_entry = generate_float_widget(\n \"Probability:\",\n \"The probability that a conditional will be augmented with an opaque predicate,\\n\"\n \"which must be a number in the range 0 <= p <= 1. A probability of 0 means that\\n\"\n \"no augmentations will occur, a probability of 0.5 means approximately half of\\n\"\n \"the program's conditionals will be augmented with opaque predicates, and 1.0\\n\"\n \"means that where possible, all conditionals will be augmented. This allows you\\n\"\n \"to achieve a mixture of augmented and non-augmented conditionals.\",\n self.probability,\n 0.0,\n 1.0,\n parent,\n )\n layout.addWidget(probability, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add an integer entry for selecting the number of predicates to add\n number, self.number_entry = generate_integer_widget(\n \"Number of predicates:\",\n \"The number of opaque predicates that will be used to augment any probabilistically\\n\"\n \"selected conditional statement, which must be some integer >= 0. A number of 0\\n\"\n \"means that no opaque predicates will be added. A number of 5 means that 5 opaque\\n\"\n \"predicates will be used to augment each chosen conditional.\\n\"\n \"Typically, n=1 is sufficient for most use cases.\",\n self.number,\n 0,\n 2147483647,\n parent,\n )\n layout.addWidget(number, alignment=Qt.AlignmentFlag.AlignTop)\n layout.addStretch()\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual opaque\n predicate augmentation unit. In this case, we load the valid predicate argument\n generation styles, the probabiltiy of augmenting existing conditionals, and\n the number of opaque predicates to augment each conditional with.\"\"\"\n # Load the set of selected valid predicate argument styles.\n if self.style_checkboxes is not None and len(self.style_checkboxes) > 0:\n self.styles = [\n s for cbox, s in self.style_checkboxes.items() if cbox.isChecked()\n ]\n self.traverser.styles = self.styles\n # Load the augmenting probability (0.0 <= p <= 1.0, default 0.75)\n if self.probability_entry is not None:\n try:\n self.probability = float(self.probability_entry.text())\n if self.probability > 1.0:\n self.probability = 1.0\n elif self.probability < 0.0:\n self.probability = 0.0\n self.traverser.probability = self.probability\n except:\n self.probability = 0.75\n self.traverser.probability = self.probability\n # Load the number of predicates to add per conditional (>= 0, default 1)\n if self.number_entry is not None:\n try:\n self.number = int(self.number_entry.text())\n if self.number < 0:\n self.number = 0\n self.traverser.number = self.number\n except:\n self.number = 1\n self.traverser.number = 1\n\n def from_json(json_str: str) -> \"GuiAugmentOpaqueUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiAugmentOpaqueUnit.\n\n Returns:\n GuiAugmentOpaqueUnit: The corresponding GuiAugmentOpaqueUnit object.\"\"\"\n unit = AugmentOpaqueUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiAugmentOpaqueUnit(unit.styles, unit.probability, unit.number)\n\n def get_gui() -> \"GuiAugmentOpaqueUnit\":\n \"\"\"Creates an opaque predicate augmentation transformation loaded with default\n values to allow GUI interaction with the transformation.\n\n Returns:\n GuiAugmentOpaqueUnit: the opaque predicate augmentation transformation with\n default values.\"\"\"\n return GuiAugmentOpaqueUnit([s for s in OpaqueAugmenter.Style], 1.0, 1)\n\n\nclass GuiInsertOpaqueUnit(InsertOpaqueUnit):\n \"\"\"The opaque insertion transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiInsertOpaqueUnit, extending the original\n transformation unit to also initialise attributes for the style, granularity\n and kind checkboxes, as well as an entry for the number of insertions.\n \"\"\"\n super(GuiInsertOpaqueUnit, self).__init__(*args, **kwargs)\n self.style_checkboxes = None\n self.granularity_checkboxes = None\n self.kind_checkboxes = None\n self.number_entry = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing an opaque predicate\n insertion transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n # Initialise the layout design to allow scrolling, as there are a lot of options.\n layout = QVBoxLayout(parent)\n layout.setContentsMargins(0, 10, 0, 10)\n scroll_widget = QScrollArea(parent)\n scroll_widget.setStyleSheet(\n \"\"\"\n QScrollArea{\n background-color: transparent;\n border: none;\n }\"\"\"\n + Df.MINIMAL_SCROLL_BAR_CSS\n )\n scroll_widget.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded)\n scroll_widget.setHorizontalScrollBarPolicy(\n Qt.ScrollBarPolicy.ScrollBarAlwaysOff\n )\n scroll_widget.setWidgetResizable(True)\n scroll_content = QWidget(scroll_widget)\n scroll_content.setObjectName(\"ScrollWidget\")\n scroll_content.setStyleSheet(\n \"\"\"\n QWidget#ScrollWidget{\n background-color: transparent;\n border: none;\n }\"\"\"\n )\n scroll_content.layout = QVBoxLayout(scroll_content)\n scroll_content.layout.setContentsMargins(0, 0, 7, 0)\n scroll_content.setSizePolicy(\n QSizePolicy.Policy.Maximum, QSizePolicy.Policy.Maximum\n )\n scroll_widget.setWidget(scroll_content)\n layout.addWidget(scroll_widget)\n\n # Add a set of checkboxes for selecting opaque predicate argument generation styles\n tooltips = {\n OpaqueAugmenter.Style.INPUT.value: \"Opaque predicates can be generated using user inputs (function parameters).\",\n OpaqueAugmenter.Style.ENTROPY.value: \"Opaque predicates can be generated using entropic (random) variables, which\\n\"\n \"are created globally and initialised with random values at the start of the\\n\"\n \"main() function. In the current implementation, for every random variable\\n\"\n \"that is needed, it is decided at random whether to use an existing variable\\n\"\n \"or to make a new one (25 percent chance), to create good diversity and increase\\n\"\n \"complexity throughout the program.\",\n }\n styles, self.style_checkboxes = generate_checkboxes_widget(\n \"Predicate Style:\",\n \"The opaque predicate generation styles that can be used by the program.\\n\"\n \"This simply refers to the types of inputs that can be utilised to make\\n\"\n \"opaque predicates, such as using user input (function parameters) or\\n\"\n \"using random variables (entropy).\",\n {\n \" \".join(style.value.split(\" \")[3:]).capitalize(): style\n for style in OpaqueInserter.Style\n },\n set(self.styles),\n parent,\n dict(\n (\" \".join(key.split(\" \")[3:]).capitalize(), val)\n for key, val in tooltips.items()\n ),\n )\n scroll_content.layout.addWidget(styles, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add a set of checkboxes for selecing insertion granularities\n granularities, self.granularity_checkboxes = generate_checkboxes_widget(\n \"Predicate Granularities:\",\n \"Opaque predicate granularities refer to the 'scope'/'size' of the program components\\n\"\n \"that are modified by the new conditional. For example, we can have conditionals that\\n\"\n \"apply to an entire function body (procedural granularity), conditionals that apply to\\n\"\n \"a contiguous sequence of multiple statements (block granularity), or new conditionals\\n\"\n \"that apply to singular program statements (statement granularity). Selecting a mixture\\n\"\n \"of these allows you to greatly increase program diversity.\",\n {g.value.split(\":\")[0].capitalize(): g for g in OpaqueInserter.Granularity},\n set(self.granularities),\n parent,\n {\n OpaqueInserter.Granularity.PROCEDURAL.value.split(\":\")[\n 0\n ].capitalize(): \"Generate new opaque predicate conditionals that encapsulate the entire\\n\"\n \"function body.\",\n OpaqueInserter.Granularity.BLOCK.value.split(\":\")[\n 0\n ].capitalize(): \"Generate new opaque predicate conditionals that encapsulate contiguous\\n\"\n \"sequences of statements (i.e. 'blocks' of code) within the function. These\\n\"\n \"blocks are chosen entirely at random, and are of random length.\",\n OpaqueInserter.Granularity.STMT.value.split(\":\")[\n 0\n ].capitalize(): \"Generate new opaque predicate conditionals that encapsulate singular\\n\"\n \"program statements within the function. These statements are chosen entirely\\n\"\n \"at random from those within the function body.\",\n },\n )\n scroll_content.layout.addWidget(\n granularities, alignment=Qt.AlignmentFlag.AlignTop\n )\n\n # Add a set of checkboxes for selecting insertion kinds/formats/structures\n kinds, self.kind_checkboxes = generate_checkboxes_widget(\n \"Predicate Kinds:\",\n \"The kinds (formats) of opaque predicate conditionals that will be inserted. This\\n\"\n \"increases obfuscation diversity by inserting opaque predicates using different\\n\"\n \"programming constructs and logical structures. For example, one kind might evaluate\\n\"\n \"the real code on an else branch of an if statement, whereas another might evaluate\\n\"\n \"buggy code within a while loop.\",\n {\n k.value.split(\":\")[0].replace(\"_\", \" \").capitalize(): k\n for k in OpaqueInserter.Kind\n },\n set(self.kinds),\n parent,\n dict(\n (\n k.value.split(\":\")[0].replace(\"_\", \" \").capitalize(),\n \"Enable construction of opaque predicate conditionals with the form\\n\"\n \" \" + k.value.split(\":\")[1].strip(),\n )\n for k in OpaqueInserter.Kind\n ),\n )\n scroll_content.layout.addWidget(kinds, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add an integer entry for selecting the number of conditionals to insert.\n number, self.number_entry = generate_integer_widget(\n \"Number per function:\",\n \"The number of new opaque predicates to add to each individual function (where\\n\"\n \"possible). Controlling this value allows you to control the degree to which the\\n\"\n \"program is obfuscated. A value of 1 <= n <= 10 is recommended, though this\\n\"\n \"depends on the kinds that you use, as some insertions can exponentially increase\\n\"\n \"the program size (notably, the 'EITHER' predicate type applied with the\\n\"\n \"'PROCEDURE' granularity will copy the function body each time it is applied\\n\"\n \"(doubling the program size).\",\n self.number,\n 0,\n 2147483647,\n parent,\n )\n scroll_content.layout.addWidget(number, alignment=Qt.AlignmentFlag.AlignTop)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual opaque\n predicate insertion unit. In this case, we load the valid predicate argument\n generation styles, the conditional insertion granularities, the conditional\n kinds/structures/formates, and the number of conditionals to attempt to add.\"\"\"\n # Load the valid opaque predicate argument generation styles.\n if self.style_checkboxes is not None and len(self.style_checkboxes) > 0:\n self.styles = [\n s for cbox, s in self.style_checkboxes.items() if cbox.isChecked()\n ]\n self.traverser.styles = self.styles\n # Load the valid conditional insertion granularities\n if (\n self.granularity_checkboxes is not None\n and len(self.granularity_checkboxes) > 0\n ):\n self.granularities = [\n g for cbox, g in self.granularity_checkboxes.items() if cbox.isChecked()\n ]\n self.traverser.granularities = self.granularities\n # Load the vaid conditional kinds/formats/structures\n if self.kind_checkboxes is not None and len(self.kind_checkboxes) > 0:\n self.kinds = [\n k for cbox, k in self.kind_checkboxes.items() if cbox.isChecked()\n ]\n self.traverser.kinds = self.kinds\n # Load the number of insertions to attempt per function (>= 0, default 5)\n if self.number_entry is not None:\n try:\n self.number = int(self.number_entry.text())\n if self.number < 0:\n self.number = 0\n self.traverser.number = self.number\n except:\n self.number = 5\n self.traverser.number = 5\n\n def from_json(json_str: str) -> \"GuiInsertOpaqueUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiInsertOpaqueUnit.\n\n Returns:\n GuiInsertOpaqueUnit: The corresponding GuiInsertOpaqueUnit object.\"\"\"\n unit = InsertOpaqueUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiInsertOpaqueUnit(\n unit.styles, unit.granularities, unit.kinds, unit.number\n )\n\n def get_gui() -> \"GuiInsertOpaqueUnit\":\n \"\"\"Creates an opaque predicate insertion transformation loaded with default\n values to allow GUI interaction with the transformation.\n\n Returns:\n GuiInsertOpaqueUnit: the opaque predicate insertion transformation with\n default values.\"\"\"\n return GuiInsertOpaqueUnit(\n [s for s in OpaqueInserter.Style],\n [g for g in OpaqueInserter.Granularity],\n [k for k in OpaqueInserter.Kind],\n 5,\n )\n\n\nclass GuiControlFlowFlattenUnit(ControlFlowFlattenUnit):\n \"\"\"The control flow flattening transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiControlFlowFlattenUnit, extending the original\n transformation unit to also initialise attributes for the style buttons and\n a checkbox for case randomisation.\n \"\"\"\n super(GuiControlFlowFlattenUnit, self).__init__(*args, **kwargs)\n self.style_buttons = None\n self.randomise_cases_checkbox = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing a control flow flattening\n transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n\n # Add radio buttons for selecting the case generation style.\n style, self.style_buttons = generate_radio_button_widget(\n \"Case Expression Style:\",\n \"The generation style to use when creating new cases for flattened\\n\"\n \"blocks in the control flow flattening procedure, dictating how cases\\n\"\n \"are labelled and transitioned between.\",\n {style.value: style for style in ControlFlowFlattener.Style},\n self.style.value,\n parent,\n {\n ControlFlowFlattener.Style.SEQUENTIAL.value: \"Generate new cases with sequentially generated integer expressions, e.g.\\n\"\n \" case 0: ...\\n\"\n \" case 1: ...\\n\"\n \" case 2: ...\\n\"\n \"etc.\",\n ControlFlowFlattener.Style.RANDOM_INT.value: \"Generate new cases with random integer expressions, e.g.\\n\"\n \" case 12: ...\\n\"\n \" case 6: ...\\n\"\n \" case -37: ...\\n\"\n \"etc.\",\n ControlFlowFlattener.Style.ENUMERATOR.value: \"Generate new cases as enumerator values, e.g.\\n\"\n \" enum x = {ABC, DEF, GHI}\\n\"\n \" switch (x) {\\n\"\n \" case ABC: ...\\n\"\n \" case DEF: ...\\n\"\n \" case GHI: ...\\n\"\n \" }\\n\"\n \"etc.\",\n },\n )\n layout.addWidget(style, 1, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add a checkbox for selecting whether to randomise the case order or not.\n randomise_cases, self.randomise_cases_checkbox = generate_checkbox_widget(\n \"Randomise Case Order?\",\n \"Randomises the order within which cases are dispatched within switch statements during\\n\"\n \"control flow flattening, such that it is more difficult to follow the code's original\\n\"\n \"sequential structure by reading through cases sequentially.\",\n self.randomise_cases,\n parent,\n )\n layout.addWidget(randomise_cases, 1, alignment=Qt.AlignmentFlag.AlignTop)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual control\n flow flattenign unit. In this case, we load the case generation style to\n be used, as well as whether to randomise the case order or not.\"\"\"\n # Load the case generation style to use (selected via radio button)\n if self.style_buttons is not None and len(self.style_buttons) > 0:\n for button, style in self.style_buttons.items():\n if button.isChecked():\n self.style = style\n self.traverser.style = style\n break\n # Load whether to randomise the order of cases or not.\n if self.randomise_cases_checkbox is not None:\n self.randomise_cases = self.randomise_cases_checkbox.isChecked()\n self.traverser.randomise_cases = self.randomise_cases\n\n def from_json(json_str: str) -> \"GuiControlFlowFlattenUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiControlFlowFlattenUnit.\n\n Returns:\n GuiControlFlowFlattenUnit: The corresponding GuiControlFlowFlattenUnit object.\"\"\"\n unit = ControlFlowFlattenUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiControlFlowFlattenUnit(unit.randomise_cases, unit.style)\n\n def get_gui() -> \"GuiControlFlowFlattenUnit\":\n \"\"\"Creates a control flow flattening transformation loaded with default values\n to allow GUI interaction with the transformation.\n\n Returns:\n GuiControlFlowFlattenUnit: the control flow flattening transformation with\n default values.\"\"\"\n return GuiControlFlowFlattenUnit(False, ControlFlowFlattener.Style.SEQUENTIAL)\n\n\nclass GuiReverseIndexUnit(ReverseIndexUnit):\n \"\"\"The index reversing transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiReverseIndexUnit, extending the original\n transformation unit to also initialise attributes for the probability entry.\n \"\"\"\n super(GuiReverseIndexUnit, self).__init__(*args, **kwargs)\n self.probability_entry = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing an array index reversal\n transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n\n # Add a float entry for entering the probability of reversing indexes.\n probability, self.probability_entry = generate_float_widget(\n \"Probability:\",\n \"The probability that an index reversal will take place, which must be a number\\n\"\n \"in the range 0 <= p <= 1. A probability of 0 means that no reversals will\\n\"\n \"occur, a probability of 0.5 means approximately half of the indexing operations\\n\"\n \"will be encoded, and 1.0 means all indexing operations are encoded. This allows\\n\"\n \"you to achieve a mix of reversed and non-reversed indexes for maximal obfuscation.\",\n self.probability,\n 0.0,\n 1.0,\n parent,\n )\n layout.addWidget(probability, 1, alignment=Qt.AlignmentFlag.AlignTop)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual array\n index reversal unit. In this case, we load the independent probability that\n an array index will be reversed.\"\"\"\n # Load the reversal probability (0.0 <= p <= 1.0, default 0.8)\n if self.probability_entry is not None:\n try:\n self.probability = float(self.probability_entry.text())\n if self.probability > 1.0:\n self.probability = 1.0\n elif self.probability < 0.0:\n self.probability = 0.0\n self.traverser.probability = self.probability\n except:\n self.probability = 0.8\n self.traverser.probability = 0.8\n\n def from_json(json_str: str) -> \"GuiReverseIndexUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiReverseIndexUnit.\n\n Returns:\n GuiReverseIndexUnit: The corresponding GuiReverseIndexUnit object.\"\"\"\n unit = ReverseIndexUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiReverseIndexUnit(unit.probability)\n\n def get_gui() -> \"GuiReverseIndexUnit\":\n \"\"\"Creates an array index reversal transformation loaded with default values to\n allow GUI interaction with the transformation.\n\n Returns:\n GuiReverseIndexUnit: the index reversal transformation with default values.\"\"\"\n return GuiReverseIndexUnit(0.8)\n\n\nclass GuiClutterWhitespaceUnit(ClutterWhitespaceUnit):\n \"\"\"The whitespace cluttering transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiClutterWhitespaceUnit, extending the original\n transformation unit to also initialise attributes for the target length\n entry, as well as a checkbox for padding lines.\n \"\"\"\n super(GuiClutterWhitespaceUnit, self).__init__(*args, **kwargs)\n self.target_length_entry = None\n self.pad_lines_checkbox = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing a whitespace cluttering\n transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n\n # Add an integer entry for the target positive line length to achieve.\n target_length, self.target_length_entry = generate_integer_widget(\n \"Target Line Length:\",\n \"The target maximum line length to aim to achieve when altering whitespace.\\n\"\n \"The line length will always be less than or equal to this, unless a single\\n\"\n \"token is greater than this length (e.g. a very long variable name). If the\\n\"\n \"padding option is set, this is the length that will be padded towards.\",\n self.target_length,\n 0,\n 2147483647,\n parent,\n )\n layout.addWidget(target_length, alignment=Qt.AlignmentFlag.AlignTop)\n layout.addSpacing(4)\n\n # Add a checkbox for whether to pad lines to achieve the target line length or not.\n pad_lines, self.pad_lines_checkbox = generate_checkbox_widget(\n \"Add Line Padding?\",\n \"Where possible, this pads lines by inserting extra spaces between tokens, such that all\\n\"\n \"lines (except those with pre-processor directives) are padded to the set target length.\",\n self.pad_lines,\n parent,\n )\n layout.addWidget(pad_lines, alignment=Qt.AlignmentFlag.AlignTop)\n layout.addStretch()\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual whitespace\n cluttering unit. In this case, we load the target line length to be achieved,\n and whether to pad tokens to achieve that line length or not.\"\"\"\n # Load the target line length (>= 0, default 3)\n if self.target_length_entry is not None:\n try:\n self.target_length = int(self.target_length_entry.text())\n if self.target_length < 0:\n self.target_length = 0\n except:\n self.target_length = 3\n # Load whether to pad lines to achieve the target length or not\n if self.pad_lines_checkbox is not None:\n self.pad_lines = self.pad_lines_checkbox.isChecked()\n\n def from_json(json_str: str) -> \"GuiClutterWhitespaceUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiClutterWhitespaceUnit.\n\n Returns:\n GuiClutterWhitespaceUnit: The corresponding GuiClutterWhitespaceUnit object.\"\"\"\n unit = ClutterWhitespaceUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiClutterWhitespaceUnit(unit.target_length, unit.pad_lines)\n\n def get_gui() -> \"GuiClutterWhitespaceUnit\":\n \"\"\"Creates a whitespace cluttering transformation loaded with default values to\n allow GUI interaction with the transformation.\n\n Returns:\n GuiCluterWhitespaceUnit: the whitespace cluttering transformation with default\n values.\"\"\"\n return GuiClutterWhitespaceUnit(100, True)\n\n\nclass GuiDiTriGraphEncodeUnit(DiTriGraphEncodeUnit):\n \"\"\"The digraph/trigraph encoding transformation with added graphical interfaces.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"The constructor for the GuiDiTriGraphEncodeUnit, extending the original\n transformation unit to also initialise attributes for the style buttons and\n the probability entry.\n \"\"\"\n super(GuiDiTriGraphEncodeUnit, self).__init__(*args, **kwargs)\n self.style_buttons = None\n self.probability_entry = None\n\n def edit_gui(self, parent: QWidget) -> None:\n \"\"\"Implements a graphical interface for editing a digraph and trigraph\n encoding transformation.\n\n Args:\n parent (QWidget): The parent widget to place this widget into.\n \"\"\"\n layout = QVBoxLayout(parent)\n\n # Add radio buttons for selecting the encoding style to use.\n style, self.style_buttons = generate_radio_button_widget(\n \"Encoding Style:\",\n \"The encoding stylt to use when replacing symbols throughout the program\\n\"\n \"body, which dictates how new macros are chosen to replace existing symbols.\",\n {style.value: style for style in DiTriGraphEncodeUnit.Style},\n self.style.value,\n parent,\n {\n DiTriGraphEncodeUnit.Style.DIGRAPH.value: \"Replace symbols []{}# with corresponding two-letter digraphs.\\n\"\n ' e.g. \"[\" ---> \"<:\".',\n DiTriGraphEncodeUnit.Style.TRIGRAPH.value: \"Replace symbols []{}#\\\\^|~ with corresponding three-letter digraphs.\\n\"\n ' e.g. \"[\" ---> \"??(\".',\n DiTriGraphEncodeUnit.Style.MIXED.value: \"Replace symbols with corresponding two-letter digraphs or three-letter\\n\"\n \"digraphs, chosen between randomly with equal probability.\",\n },\n )\n layout.addWidget(style, 1, alignment=Qt.AlignmentFlag.AlignTop)\n\n # Add a float entry for selecting the probability of performing encodings.\n probability, self.probability_entry = generate_float_widget(\n \"Probability:\",\n \"The probability that an encoding will take place, which must be a number\\n\"\n \"in the range 0 <= p <= 1. A probability of 0 means that no encodings will\\n\"\n \"occur, a probability of 0.5 means approximately half of the symbols will\\n\"\n \"be encoded, and 1.0 means all symbols are encoded. This allows you to achieve\\n\"\n \"a mixture of digraphs, trigraphs and regular symbols for maximal obfuscation.\",\n self.chance,\n 0.0,\n 1.0,\n parent,\n )\n layout.addWidget(probability, 1, alignment=Qt.AlignmentFlag.AlignTop)\n parent.setLayout(layout)\n\n def load_gui_values(self) -> None:\n \"\"\"Loads the user options from relevant UI elements into the actual digraph and\n trigraph encoding unit. In this case, we load the encoding style to be used,\n alongside the probability that an encoding should be performed.\"\"\"\n # Load the encoding style to used (selected via radio buttons)\n if self.style_buttons is not None and len(self.style_buttons) > 0:\n for button, style in self.style_buttons.items():\n if button.isChecked():\n self.style = style\n break\n # Load the encoding probability (0.0 <= p <= 1.0, default 0.75)\n if self.probability_entry is not None:\n try:\n self.chance = float(self.probability_entry.text())\n if self.chance > 1.0:\n self.chance = 1.0\n elif self.chance < 0.0:\n self.chance = 0.0\n except:\n self.chance = 0.75\n\n def from_json(json_str: str) -> \"GuiDiTriGraphEncodeUnit\":\n \"\"\"Loads the GUI obfuscation unit from its JSON string representation by\n calling the relevant unit-specific conversion method and parsing its attributes.\n\n Args:\n json_str (str): The JSON string representation of the GuiDiTriGraphEncodeUnit.\n\n Returns:\n GuiDiTriGraphEncodeUnit: The corresponding GuiDiTriGraphEncodeUnit object.\"\"\"\n unit = DiTriGraphEncodeUnit.from_json(json_str)\n if unit is None:\n return None\n return GuiDiTriGraphEncodeUnit(unit.style, unit.chance)\n\n def get_gui() -> \"GuiDiTriGraphEncodeUnit\":\n \"\"\"Creates a digraph/trigraph encoding transformation loaded with default values\n to allow GUI interaction with the transformation.\n\n Returns:\n GuiDiTriGraphEncodeUnit: the digraph/trigraph encoding transformation with\n default values.\"\"\"\n return GuiDiTriGraphEncodeUnit(DiTriGraphEncodeUnit.Style.MIXED, 0.75)\n","repo_name":"AlexJones0/obfusCate","sub_path":"app/obfuscation/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":69369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"3498507724","text":"from django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom property.models import Property, Room, PropertyEval\nfrom property.forms import RegisterForm\n# Create your views here.\n# Home page\ndef home(request):\n room_type = Room.objects.values('room_type').distinct()\n location = Property.objects.values('neighbourhood_group').distinct()\n\n search_query = request.GET.get('search', '')\n location_query = request.GET.getlist('location')\n room_type_query = request.GET.get('room_type', '')\n sort_by = request.GET.get('sort_by', '')\n\n property = Property.objects.all()\n\n if search_query:\n property = property.filter(name__icontains=search_query)\n\n if location_query:\n property = property.filter(neighbourhood_group__in=location_query)\n\n if room_type_query:\n property = property.filter(room__room_type=room_type_query)\n\n if sort_by:\n if sort_by == 'ASC':\n property = property.order_by('room__price')\n elif sort_by == 'DSC':\n property = property.order_by('-room__price')\n\n paginator = Paginator(property, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n context = {\n 'room_type': room_type,\n 'location': location,\n 'page_obj': page_obj,\n 'search': search_query,\n 'sort_by': sort_by,\n }\n\n return render(request, 'property/home.html', context)\n\n# Register Page\ndef register(request):\n form = RegisterForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.customer.first_name = form.cleaned_data.get('first_name')\n user.customer.last_name = form.cleaned_data.get('last_name')\n user.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=password)\n login(request, user)\n messages.success(request, 'Registration successful. Welcome!')\n return redirect('home')\n return render(request, 'property/register.html', {'form': form})\n\n@login_required\ndef dashboard(request):\n user = request.user\n if user.is_authenticated & user.is_staff:\n property_data = PropertyEval.objects.all()\n labels = [property.name for property in property_data]\n prices = [property.price for property in property_data]\n reviews = [property.number_of_reviews for property in property_data]\n data = {\n 'labels': labels,\n 'prices': prices,\n 'reviews': reviews,\n }\n return render(request, 'property/dashboard.html',{'data': data})\n else:\n return redirect('login.html')\n\n@login_required\ndef logout_user(request):\n logout(request)\n return redirect('home')","repo_name":"t09Simi/PropertyBooking","sub_path":"property/views/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"29541527583","text":"from django.contrib.messages.api import error\nfrom django.db import models\nfrom django.db.models.deletion import CASCADE\nfrom ..users.models import User\n\n# Create your models here.\nclass BookManager(models.Manager):\n def book_validator(self, data):\n errors = []\n if len(data['title']) < 2:\n errors.append('Book title must be at least 2 characters')\n if len(data['author']) < 2:\n errors.append('Author name must be at lest 2 characters')\n if len(data['review']) < 2:\n errors.append('Review must be at least 2 characters')\n if not data['rating']:\n errors.append('Must give a rating')\n if data['rating'] and int(data['rating']) > 5:\n errors.append('Rating must be between 1 and 5')\n check_books = Book.objects.all()\n for book in check_books:\n if data['title'].title() == book.title and data['author'].title() == book.author:\n errors.append('Book has already been added')\n break\n return errors\n\n def review_validator(self,data):\n errors = []\n if len(data['review']) < 2:\n errors.append('Review must be at least 2 characters')\n if not data['rating']:\n errors.append('Must give a rating')\n if data['rating'] and int(data['rating']) > 5:\n errors.append('Rating must be between 1 and 5')\n return errors\n \n def add_book(self, data, user_id):\n user = User.objects.get(id = user_id)\n book = Book.objects.create(\n title = data['title'].title(),\n author = data['author'].title(),\n uploaded_by = user\n )\n return book.id\n\n def add_review(self, data, user_id, book_id):\n review_owner = User.objects.get(id = user_id)\n book_reviewing = Book.objects.get(id = book_id)\n review = Review.objects.create(\n review = data['review'],\n rating = data['rating'],\n user = review_owner,\n book = book_reviewing\n )\n return review.id\n\n\nclass Book(models.Model):\n title = models.CharField(max_length=255)\n author = models.TextField()\n uploaded_by = models.ForeignKey(User, related_name='uploaded_books', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n # users_who_liked = models.ManyToManyField(User, related_name='liked_books')\n objects = BookManager()\n\n\nclass Review(models.Model):\n review = models.TextField()\n rating = models.IntegerField()\n user = models.ForeignKey(User, related_name='book_reviews', on_delete= CASCADE)\n book = models.ForeignKey(Book, related_name='reviews', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = BookManager()","repo_name":"AbomiDaba/BookWorm","sub_path":"apps/books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31690568582","text":"import re\n\nLINE_REGEX = re.compile('(\\w+)\\ \\{\\((\\d+),\\ (\\d+)\\), \\[(.*)\\], \\{(.*)\\}\\}')\n\ndef parse_line(line):\n groups = LINE_REGEX.search(line)\n name = groups.group(1)\n x = int(groups.group(2))\n y = int(groups.group(3))\n interests = groups.group(4).split(',')\n attrs = groups.group(5).strip(', \\n')\n attributes = {}\n if attrs:\n attrs = attrs.split(',')\n for attr in attrs:\n attr = attr.split(':')\n attributes[attr[0].strip()] = attr[1].strip()\n return name, x, y, interests, attributes\n\n\ndef similarity(x_i, y_i):\n set_x = set(x_i)\n set_y = set(y_i)\n return len(set_x & set_y) / float(len(set_x | set_y))\n\n\ndef match_users(all_users, user, distance, preferences):\n cell = user.location.cell()\n users = []\n if distance == 'VERY_NEAR':\n users = all_users.get(cell, [])\n elif distance == 'NEAR':\n for cell in user.location.adjacent_cells():\n users += all_users.get(cell, [])\n else:\n for cell in user.location.away_cells():\n users += all_users.get(cell, [])\n\n if user in users:\n users.remove(user)\n\n sim = {}\n matching_users = []\n for u in users:\n if u.match_preferences(preferences):\n sim[u] = similarity(u.interests, user.interests)\n matching_users.append(u)\n\n matching_users.sort(key=lambda x: sim[x], reverse=True)\n return matching_users, sim\n","repo_name":"harshulj/morbi","sub_path":"morbi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25540144516","text":"# Importando os módulos necessários\r\nfrom time import sleep\r\nfrom os import system\r\nfrom funcoes import cursor, conn, atualiza_estoque_venda, verifica_estoque, calcula_total_unitario\r\nfrom prettytable import PrettyTable\r\n\r\n\r\ndef vendas():\r\n while True:\r\n # Exibe o menu de vendas\r\n system('cls')\r\n colunasvendas = ['ID', 'ID DO CLIENTE', 'DATA', 'TOTAL']\r\n colunasvendaplantas = ['ID DA VENDA', 'ID DA PLANTA', 'QUANTIDADE', 'TOTAL POR PLANTA']\r\n system('cls')\r\n sleep(0.5)\r\n print('----------------------MENU VENDAS--------------------')\r\n # Pede para o usuário inserir a opção desejada\r\n print('1 - Pesquisar vendas realizadas\\n2 - Cadastrar vendas\\n3 - Excluir venda\\n0 - Voltar')\r\n escolha = int(input('Escolha a opção: '))\r\n system('cls')\r\n # Se a escolha for 1 irá exibir opções de pesquisa na tabela vendas\r\n if escolha == 1:\r\n while True:\r\n system('cls')\r\n print('1 - Pesquisar todas vendas\\n2 - Pesquisar detalhes de uma venda\\n0 - Voltar')\r\n escolha = int(input('Escolha a opção: '))\r\n system('cls')\r\n # Se a escolha for 1 irá exibir todas as vendas ordenadas pela data\r\n if escolha == 1:\r\n cursor.execute(\"SELECT * FROM vendas order by data\")\r\n resultados = cursor.fetchall()\r\n tabela = PrettyTable(colunasvendas)\r\n for registro in resultados:\r\n tabela.add_row(registro)\r\n print(tabela)\r\n input('Pressione ENTER para continuar')\r\n # Se a escolha for 2 exibe os detalhes de uma venda escolhida pelo ID\r\n elif escolha == 2:\r\n id_venda = int(input('Insira o ID da venda: '))\r\n cursor.execute('SELECT * FROM vendas_plantas WHERE id_venda = %s', (id_venda,))\r\n resultados = cursor.fetchall()\r\n tabela = PrettyTable(colunasvendaplantas)\r\n for registro in resultados:\r\n tabela.add_row(registro)\r\n print(tabela)\r\n input('Pressione ENTER para continuar')\r\n elif escolha == 0:\r\n break\r\n else:\r\n sleep(1)\r\n print('\\nOpção Inválida!\\n')\r\n sleep(1)\r\n # Opção para cadastrar uma venda\r\n elif escolha == 2:\r\n valor_total = 0 # Define o valor total como 0 para depois ir somando\r\n # pede os dados para cadastro da venda\r\n cliente_id = int(input('Insira o ID do cliente: '))\r\n quant_plantas = int(input('Quantidade de plantas: '))\r\n data = input('Data: ')\r\n # insere na tabela vendas os dados e retorna o id gerado\r\n cursor.execute('INSERT INTO vendas (id_cliente, data)'\r\n 'VALUES (%s, %s)'\r\n 'RETURNING id',\r\n (cliente_id, data))\r\n conn.commit()\r\n id_venda = cursor.fetchone()[0]\r\n # laço para cadastro da(s) planta(s) vendida(s)\r\n for i in range(quant_plantas):\r\n id_planta = int(input('Id da planta: '))\r\n quant_planta = int(input('Quantidade da planta: '))\r\n verifica_estoque(id_planta, quant_planta, id_venda) # verifica se a quantidade inserida esta\r\n # disponível em estoque, se não estiver irá cancelar toda a venda e apagar os dados já inseridos\r\n total_unitario = calcula_total_unitario(id_planta, quant_planta) # calcula o valor unitário\r\n # insere os dados na tabela vendas_plantas\r\n cursor.execute('INSERT INTO vendas_plantas (id_venda, id_planta, quantidade, total_unitario)'\r\n 'VALUES (%s, %s, %s, %s); ', (id_venda, id_planta, quant_planta, total_unitario))\r\n conn.commit()\r\n atualiza_estoque_venda(id_planta, quant_planta) # atualiza o estoque\r\n valor_total += total_unitario # calcula o valor total\r\n # insere o valor total na tabela vendas\r\n cursor.execute('UPDATE vendas SET valor_total = %s WHERE id = %s', (valor_total, id_venda))\r\n conn.commit()\r\n print('Venda cadastrada!')\r\n input('Pressione ENTER para continuar!')\r\n # Opção para excluir uma venda\r\n elif escolha == 3:\r\n id_venda = int(input('Insira o ID da venda que deseja excluir: ')) # pede o id para exclusão\r\n cursor.execute('SELECT * FROM vendas WHERE id = %s', (id_venda,))\r\n resultados = cursor.fetchall()\r\n tabela = PrettyTable(colunasvendas)\r\n for registro in resultados:\r\n tabela.add_row(registro)\r\n print('Tem certeza que deseja excluir os seguintes dados:')\r\n print(tabela)\r\n escolha = input('S ou N: ') # mostra os dados da venda e pede confirmação\r\n if escolha.lower() == 's':\r\n cursor.execute('DELETE FROM vendas_plantas WHERE id_venda = %s', (id_venda,))\r\n conn.commit() # primeiro deleta da tabela vendas_plantas\r\n cursor.execute('DELETE FROM vendas WHERE id = %s', (id_venda,))\r\n conn.commit() # Agora deleta da tabela vendas\r\n system('cls')\r\n print('Deletado com Sucesso!')\r\n input('Pressione ENTER para continuar')\r\n elif escolha.lower() == 'n':\r\n break\r\n else:\r\n sleep(1)\r\n print('\\nOpção Inválida!\\n')\r\n sleep(2)\r\n elif escolha == 0:\r\n break\r\n else:\r\n sleep(1)\r\n print('\\nOpção Inválida!\\n')\r\n sleep(1)\r\n","repo_name":"AlanLavall/pysql","sub_path":"vendas.py","file_name":"vendas.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28155141833","text":"from tkinter import*\nimport sqlite3\nimport tkinter.ttk as ttk\nimport tkinter.messagebox as tkMessageBox\nfrom datetime import datetime, date, time, timedelta\nimport time\nimport datetime\n\nroot = Tk()\n#==================RELOJ DIGITAL=============================================================\nroot.focus()\nroot.config(cursor='watch')\n\nroot.title(\"SISTEMA LAVANDERIA\")\nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\n\nroot.attributes(\"-fullscreen\", False)\nahora = time.strftime(\"%c\")\n\n#==================FechaACTUAL===============#\nx = datetime.datetime.now()\nfecha = (\"%s/%s/%s\" % (x.day, x.month, x.year))\n#==================FechaACTUAL===============#\n\n#==================================LISTBOX============================================\n\ndef on_keyrelease(event):\n # get text from entry\n value = event.widget.get()\n value = value.strip().lower()\n\n # get data from test_list\n if value == '':\n data = test_list\n else:\n data = []\n for item in test_list:\n if value in item.lower():\n data.append(item)\n\n # update data in listbox\n listbox_update(data)\n\ndef listbox_update(data):\n # delete previous data\n listbox.delete(0, 'end')\n\n # sorting data\n data = sorted(data, key=str.lower)\n\n # put new data\n for item in data:\n listbox.insert('end', item)\n\n\ndef on_select(event):\n # display element selected on list\n print('(event) previous:', event.widget.get('active'))\n print('(event) current:', event.widget.get(event.widget.curselection()))\n print('---')\n\ntest_list = ('saco', 'terno', 'blusa', 'vestido', 'mochila', 'sinnombre', 'nose', 'algo' )\n\n#=====================================================================================\ndef Database():\n global conn, cursor\n conn = sqlite3.connect('lavanderia.db')\n cursor = conn.cursor()\n cursor.execute(\"CREATE TABLE IF NOT EXISTS `cliente` (mem_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, nombre TEXT, apellido TEXT, estado TEXT, descripcion TEXT, precio TEXT, fecha TEXT)\")\n\ndef Create():\n if NOMBRE.get() == \"\" or APELLIDO.get() == \"\" or ESTADO.get() == \"\" or DESCRIPCION.get() == \"\" or PRECIO.get() == \"\" or FECHA.get() == \"\":\n txt_result.config(text=\"Por favor complete el campo requerido!\", fg=\"red\")\n else:\n Database()\n cursor.execute(\"INSERT INTO `cliente` (nombre, apellido, estado, descripcion, precio, fecha) VALUES(?, ?, ?, ?, ?, ?)\", (str(NOMBRE.get()), str(APELLIDO.get()), str(ESTADO.get()), str(DESCRIPCION.get()), str(PRECIO.get()), str(FECHA.get())))\n tree.delete(*tree.get_children())\n cursor.execute(\"SELECT * FROM `cliente` ORDER BY `apellido` ASC\")\n fetch = cursor.fetchall()\n for data in fetch:\n tree.insert('', 'end', values=(data[0], data[1], data[2], data[3], data[4], data[5], data[6]))\n conn.commit()\n NOMBRE.set(\"\")\n APELLIDO.set(\"\")\n ESTADO.set(\"\")\n DESCRIPCION.set(\"\")\n PRECIO.set(\"\")\n FECHA.set(\"\")\n cursor.close()\n conn.close()\n txt_result.config(text=\"Instancia creada\", fg=\"green\")\n\ndef Read():\n tree.delete(*tree.get_children())\n Database()\n cursor.execute(\"SELECT * FROM `cliente` ORDER BY `apellido` ASC\")\n fetch = cursor.fetchall()\n for data in fetch:\n tree.insert('', 'end', values=(data[0], data[1], data[2], data[3], data[4], data[5], data[6]))\n cursor.close()\n conn.close()\n txt_result.config(text=\"Datos mostrados correctamente\", fg=\"black\")\n\ndef Search():\n tree.delete(*tree.get_children())\n Database()\n nose = (str(NOMBRE.get()), str(APELLIDO.get()),)\n cursor.execute(\"SELECT * FROM `cliente` WHERE nombre = ? AND apellido = ?\", nose)\n fetch = cursor.fetchall()\n for data in fetch:\n tree.insert('', 'end', values=(data[0], data[1], data[2], data[3], data[4], data[5], data[6]))\n cursor.close()\n conn.close()\n txt_result.config(text=\"Datos mostrados correctamente\", fg=\"black\")\n\ndef Update():\n Database()\n if ESTADO.get() == \"\":\n txt_result.config(text=\"Por favor seleccione un genero\", fg=\"red\")\n else:\n tree.delete(*tree.get_children())\n cursor.execute(\"UPDATE `cliente` SET `nombre` = ?, `apellido` = ?, `estado` =?, `descripcion` = ?, `precio` = ?, `fecha` = ? WHERE `mem_id` = ?\", (str(NOMBRE.get()), str(APELLIDO.get()), str(ESTADO.get()), str(DESCRIPCION.get()), str(PRECIO.get()), str(FECHA.get()), int(mem_id)))\n conn.commit()\n cursor.execute(\"SELECT * FROM `cliente` ORDER BY `apellido` ASC\")\n fetch = cursor.fetchall()\n for data in fetch:\n tree.insert('', 'end', values=(data[0], data[1], data[2], data[3], data[4], data[5], data[6]))\n cursor.close()\n conn.close()\n NOMBRE.set(\"\")\n APELLIDO.set(\"\")\n ESTADO.set(\"\")\n DESCRIPCION.set(\"\")\n PRECIO.set(\"\")\n FECHA.set(\"\")\n btn_create.config(state=NORMAL)\n btn_read.config(state=NORMAL)\n btn_update.config(state=DISABLED)\n btn_delete.config(state=NORMAL)\n txt_result.config(text=\"Datos actualizados con exito\", fg=\"black\")\n\ndef OnSelected(event):\n global mem_id;\n curItem = tree.focus()\n contents =(tree.item(curItem))\n selecteditem = contents['values']\n mem_id = selecteditem[0]\n NOMBRE.set(\"\")\n APELLIDO.set(\"\")\n ESTADO.set(\"\")\n DESCRIPCION.set(\"\")\n PRECIO.set(\"\")\n FECHA.set(\"\")\n NOMBRE.set(selecteditem[1])\n APELLIDO.set(selecteditem[2])\n DESCRIPCION.set(selecteditem[4])\n PRECIO.set(selecteditem[5])\n FECHA.set(selecteditem[6])\n btn_create.config(state=DISABLED)\n btn_read.config(state=DISABLED)\n btn_update.config(state=NORMAL)\n btn_delete.config(state=DISABLED)\n\ndef Delete():\n if not tree.selection():\n txt_result.config(text=\"Por favor, seleccione un artículo primero\", fg=\"red\")\n else:\n result = tkMessageBox.askquestion('SISTEMA LAVANDERIA', '¿Seguro que quieres borrar este registro?', icon=\"warning\")\n if result == 'yes':\n curItem = tree.focus()\n contents =(tree.item(curItem))\n selecteditem = contents['values']\n tree.delete(curItem)\n Database()\n cursor.execute(\"DELETE FROM `cliente` WHERE `mem_id` = %d\" % selecteditem[0])\n conn.commit()\n cursor.close()\n conn.close()\n txt_result.config(text=\"Se borraron los datos\", fg=\"black\")\n\ndef Exit():\n result = tkMessageBox.askquestion('SISTEMA LAVANDERIA', '¿Seguro que quieres salir?', icon=\"warning\")\n if result == 'yes':\n root.destroy()\n exit()\n\n\n\n\n#==================================VARIABLES==========================================\nNOMBRE = StringVar()\nAPELLIDO = StringVar()\nESTADO = StringVar()\nDESCRIPCION = StringVar()\nPRECIO = StringVar()\nFECHA = StringVar()\n\n#==================================FRAME==============================================\nTop = Frame(root, width=900, height=50, bd=8, relief=\"raise\")\nTop.pack(side=TOP)\nLeft = Frame(root, width=600, height=500, bd=8, relief=\"raise\")\nLeft.pack(side=LEFT)\nRight = Frame(root, width=600, height=500, bd=8, relief=\"raise\")\nRight.pack(side=RIGHT)\nForms = Frame(Left, width=300, height=450)\nForms.pack(side=TOP)\nButtons = Frame(Left, width=300, height=100, bd=8, relief=\"raise\")\nButtons.pack(side=BOTTOM)\nRadioGroup = Frame(Forms)\nAlAgua = Radiobutton(RadioGroup, text=\"En inventario\", variable=ESTADO, value=\"En inventario\", font=('arial', 16)).pack(side=LEFT)\nAlSeco = Radiobutton(RadioGroup, text=\"Entregado\", variable=ESTADO, value=\"Entregado\", font=('arial', 16)).pack(side=LEFT)\n\n\n\n#====================EmpaquetandoENTRY===================#\nentry = Entry(Left)\nentry.pack()\n#====================EmpaquetandoENTRY===================#\n\n#====================EmpaquetandoLISTBOX===================#\nlistbox = Listbox(Left)\nlistbox.pack()\n#====================EmpaquetandoLISTBOX===================#\n\n#====================EmpaquetandoFECHA===================#\nfecha = Label(Top, font=('ubuntu', 30, 'bold'), bg='#3C3B37', fg='white', bd=0, text = fecha)\nfecha.pack(side = LEFT)\n#====================EmpaquetandoRELOJ===================#\n\n#====================EmpaquetandoFECHA===================#\ntime1 = ''\nclock = Label(Top, font=('ubuntu', 30, 'bold'), bg='#3C3B37', fg='white', bd=0)\nclock.pack(side = RIGHT)\n\n\n\ndef tick():\n global time1\n time2 = time.strftime('%H:%M:%S')\n if time2 != time1:\n time1 = time2\n clock.config(text=time2)\n clock.after(200, tick)\ntick()\n#====================EmpaquetandoRELOJ===================#\n\n\n#==================================LABEL WIDGET=======================================\ntxt_title = Label(Top, width=900, font=('arial', 24), text = \"Sistema Lavanderia\")\ntxt_title.pack()\ntxt_nombre = Label(Forms, text=\"Nombre:\", font=('arial', 16), bd=15)\ntxt_nombre.grid(row=0, sticky=\"e\")\ntxt_apellido = Label(Forms, text=\"Apellido:\", font=('arial', 16), bd=15)\ntxt_apellido.grid(row=1, sticky=\"e\")\ntxt_estado = Label(Forms, text=\"Estado:\", font=('arial', 16), bd=15)\ntxt_estado.grid(row=2, sticky=\"e\")\ntxt_descripcion = Label(Forms, text=\"Descripcion:\", font=('arial', 16), bd=15)\ntxt_descripcion.grid(row=3, sticky=\"e\")\ntxt_precio = Label(Forms, text=\"Precio:\", font=('arial', 16), bd=15)\ntxt_precio.grid(row=4, sticky=\"e\")\ntxt_fecha = Label(Forms, text=\"Fecha:\", font=('arial', 16), bd=15)\ntxt_fecha.grid(row=5, sticky=\"e\")\ntxt_result = Label(Buttons)\ntxt_result.pack(side=TOP)\n\n#==================================ENTRY WIDGET=======================================\nnombre = Entry(Forms, textvariable=NOMBRE, width=30)\nnombre.grid(row=0, column=1)\napellido = Entry(Forms, textvariable=APELLIDO, width=30)\napellido.grid(row=1, column=1)\nRadioGroup.grid(row=2, column=1)\ndescripcion = Entry(Forms, textvariable=DESCRIPCION, width=30)\ndescripcion.grid(row=3, column=1)\nprecio = Entry(Forms, textvariable=PRECIO, width=30)\nprecio.grid(row=4, column=1)\nfecha = Entry(Forms, textvariable=FECHA, width=30)\nfecha.grid(row=5, column=1)\n\nEntry = entry.bind('', on_keyrelease)\nListbox = listbox.bind('<>', on_select)\nlistbox_update(test_list)\n\n#==================================BUTTONS WIDGET=====================================\nbtn_create = Button(Buttons, width=10, text=\"Crear\", command=Create)\nbtn_create.pack(side=LEFT)\nbtn_read = Button(Buttons, width=10, text=\"Leer\", command=Read )\nbtn_read.pack(side=LEFT)\nbtn_update = Button(Buttons, width=10, text=\"Actualizar\", command=Update, state=DISABLED)\nbtn_update.pack(side=LEFT)\nbtn_delete = Button(Buttons, width=10, text=\"Borrar\", command=Delete)\nbtn_delete.pack(side=LEFT)\nbtn_exit = Button(Buttons, width=10, text=\"Salir\", command=Exit)\nbtn_exit.pack(side=LEFT)\nbtn_buscar = Button(Buttons, width=10, text=\"Buscar\", command=Search)\nbtn_buscar.pack(side=LEFT)\n\n#==================================LIST WIDGET========================================\nscrollbary = Scrollbar(Right, orient=VERTICAL)\nscrollbarx = Scrollbar(Right, orient=HORIZONTAL)\ntree = ttk.Treeview(Right, columns=(\"ClienteID\", \"Nombre\", \"Apellido\", \"Estado\", \"Descripcion\", \"Precio\", \"Fecha\"), selectmode=\"extended\", height=500, yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)\nscrollbary.config(command=tree.yview)\nscrollbary.pack(side=RIGHT, fill=Y)\nscrollbarx.config(command=tree.xview)\nscrollbarx.pack(side=BOTTOM, fill=X)\ntree.heading('ClienteID', text=\"ClienteID\", anchor=W)\ntree.heading('Nombre', text=\"Nombre\", anchor=W)\ntree.heading('Apellido', text=\"Apellido\", anchor=W)\ntree.heading('Estado', text=\"Estado\", anchor=W)\ntree.heading('Descripcion', text=\"Descripcion\", anchor=W)\ntree.heading('Precio', text=\"Precio\", anchor=W)\ntree.heading('Fecha', text=\"Fecha\", anchor=W)\ntree.column('#0', stretch=NO, minwidth=0, width=0)\ntree.column('#1', stretch=NO, minwidth=0, width=0)\ntree.column('#2', stretch=NO, minwidth=0, width=80)\ntree.column('#3', stretch=NO, minwidth=0, width=120)\ntree.column('#4', stretch=NO, minwidth=0, width=80)\ntree.column('#5', stretch=NO, minwidth=0, width=150)\ntree.column('#6', stretch=NO, minwidth=0, width=120)\ntree.column('#7', stretch=NO, minwidth=0, width=120)\ntree.pack()\ntree.bind('', OnSelected)\n\n#==================================INITIALIZATION=====================================\nif __name__ == '__main__':\n root.mainloop()\n","repo_name":"AlexanderPrincipe/Sistema_Python_Tkinter","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":12413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"21342568989","text":"\"\"\"\n 连接主要运行工具,geth\n\"\"\"\n\nimport subprocess\nimport os\nimport psutil\n\nclass AccessMainWorkTool:\n\n def __init__(self, config, ethash_dagdir, log_path=\"cache\"):\n \"\"\"\n\n :param datadir: 和Init的路劲相同 否则chainId 会 出错\n :param ethash_dagdir:\n :param networkid:\n :param wsapi:\n \"\"\"\n\n self.ethash_dagdir = ethash_dagdir\n self.log = log_path\n self.config = config\n self.process = None\n\n def start(self):\n command = \"hammer --config {} --ethash.dagdir {}\"\\\n .format(self.config, self.ethash_dagdir)\n\n self.process = subprocess.Popen(command, shell=True, close_fds=True)\n p = psutil.Process(self.process.pid)\n\n\n def stop(self):\n os.popen(\"taskkill /im hammer.exe /F /T\")\n\n\n def stop_2(self):\n # 根据进程名杀死\n for p in psutil.pids():\n t_p = psutil.Process(p)\n if t_p.name() == \"hammer.exe\":\n\n os.kill(p,9)\n\n\n","repo_name":"XiaoXiaoDaoShi/HappyWater","sub_path":"Client/AccessMainWorkTool.py","file_name":"AccessMainWorkTool.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"2441192529","text":"# -*- coding: utf-8 -*-\n\nimport numpy\nimport itertools\nimport scipy.interpolate\nimport scipy.signal\n\nimport numpy\nimport itertools\nimport scipy.interpolate\nimport scipy.signal\nimport scipy.optimize\n\n__author__ = 'Kryosugarra'\n\n\n#\n# Peak find\n#\ndef get1DPeak(x, y, *, inverse=False, interpolate=True, indicies=False, use='mass'):\n assert len(x) == len(y)\n if inverse:\n maxY = numpy.max(y)\n y = numpy.multiply(y, -1)\n y = numpy.add(y, maxY)\n peakPos = -1\n if use == 'mass':\n peakPos = numpy.divide(numpy.sum(numpy.multiply(x, y)), numpy.sum(y))\n elif use == 'mode':\n peakPos = x[numpy.where(y == numpy.max(y))[0][0]]\n elif use == 'median':\n mode = numpy.median(y)\n diffs = [numpy.abs(i - mode) for i in y]\n peakPos = x[numpy.where(diffs == numpy.min(diffs))[0][0]]\n elif use == 'average':\n avg = numpy.average(y)\n diffs = [numpy.abs(i - avg) for i in y]\n peakPos = x[numpy.where(diffs == numpy.min(diffs))[0][0]]\n elif use == 'max':\n max = numpy.max(y)\n diffs = [numpy.abs(i - max) for i in y]\n peakPos = x[numpy.where(diffs == numpy.min(diffs))[0][0]]\n elif use == 'FWHW':\n max = numpy.max(y)\n #center = y.index(max)\n center = numpy.where(y==max)[0][0]\n diffs = [numpy.abs(i - max/2.) for i in y]\n\n diffs_r = diffs[center:]\n diffs_l = diffs[:center]\n firstIntersection = diffs_l.index(numpy.min(numpy.abs(diffs_l)))\n\n secondIntersectoin = center + diffs_r.index(numpy.min(numpy.abs(diffs_r)))\n\n peakPos = x[firstIntersection] /2. + x[secondIntersectoin]/2.\n else:\n raise Exception('Bad peak find mechanism')\n\n if indicies:\n diffs = [i - peakPos for i in x]\n peakIndex = diffs.index(numpy.min(numpy.abs(diffs)))\n peakPos = peakIndex\n peakVal = y[peakIndex]\n elif interpolate:\n f = scipy.interpolate.interp1d(x, y)\n peakVal = f(peakPos)\n else:\n diffs = [numpy.abs(i - peakPos) for i in x]\n #peakIndex = diffs.index(numpy.min(numpy.abs(diffs)))\n peakIndex = diffs.index(numpy.min(diffs))\n peakPos = x[peakIndex]\n peakVal = y[peakIndex]\n\n return peakPos, peakVal\n\n\ndef getPeakCurvatureRadius(x, y, peakMin = -1, peakMax = -1):\n print('FIXME')\n f = scipy.interpolate.interp1d(x, y, 'cubic')\n m = 0\n peakX = -1\n for x_i, y_i in itertools.zip_longest(x, y):\n if y_i >= m:\n m = y_i\n peakX = x_i\n\n #peakX = get1DPeak(x, y)\n\n #deriv1\n dx = numpy.diff(x)\n y1 = numpy.divide(numpy.diff(f(x)), dx)\n x1 = numpy.add(numpy.divide(numpy.diff(x), 2), x[0:-1])\n f1 = scipy.interpolate.interp1d(x1, y1, 'cubic')\n\n #deriv2\n dx = numpy.diff(x1)\n y2 = numpy.divide(numpy.diff(f1(x1)), dx)\n x2 = numpy.add(numpy.divide(numpy.diff(x1), 2), x1[0:-1])\n f2 = scipy.interpolate.interp1d(x2, y2, 'cubic')\n\n R = numpy.abs(numpy.divide(numpy.power(numpy.add(1, numpy.power(f1(x2), 2)), 3.0/2.0), f2(x2)))\n Rint = scipy.interpolate.interp1d(x2, R, 'cubic')\n\n #plt.plot(x, f(x))\n #plt.plot(x1, f1(x1))\n #plt.plot(x2, f2(x2))\n #plt.plot(x2, Rint(x2))\n\n numR = -1\n if peakMax != -1 and peakMin != -1:\n tehR, n = 0, 0\n for x, y in itertools.zip_longest(x2, R):\n if 1100 <= x <= 1125:\n #if 450 <= x <= 500:\n n += 1\n tehR += y\n numR = tehR/n\n\n return Rint(peakX), peakX, numR\n\n\n#\n# Function fitting\n#\ndef gaussian(x,a,x0,sigma):\n return a*numpy.exp(-(x-x0)**2/(2*sigma**2))\n\n\ndef fit_gaussian(s, bounds=None, method=\"lm\"):\n pos, int = get1DPeak(s.wavelengths, s.data, interpolate=True, indicies=False, use=\"mass\")\n sigma = numpy.sqrt(numpy.sum(numpy.multiply(s.data, numpy.subtract(s.wavelengths, pos)**2))/len(s.data))\n\n if bounds:\n popt, pcov = scipy.optimize.curve_fit(gaussian, s.wavelengths, s.data,\n p0=[int, pos, sigma], method=method,\n bounds=bounds)\n else:\n popt, pcov = scipy.optimize.curve_fit(gaussian, s.wavelengths, s.data,\n p0=[int, pos, sigma], method=method)\n return popt","repo_name":"kitchenknif/pyopticsutil","sub_path":"utilities/geometry_1d.py","file_name":"geometry_1d.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15388856486","text":"from pygame.locals import *\nimport pygame\n\nscale = 100\niterations = 50\npygame.init()\n\nsize = width, height = 400, 400\nscreen = pygame.display.set_mode(size)\nxaxis = width / 2\nyaxis = height / 2\nscreen.fill((255, 255, 255))\n\n\ndef mandelbrot(zoom):\n for iy in range(height):\n for ix in range(width):\n\n z = 0 + 0j\n c = complex(float(ix - xaxis) / (scale * zoom), float(iy - yaxis) / (scale * zoom))\n\n for i in range(iterations):\n z = z ** 2 + c\n if abs(z) > 2:\n v = (1000 * i) / iterations\n if v > 510:\n color = (255, 255, v % 255)\n elif v > 255:\n color = (255, v % 255, 0)\n else:\n color = (v % 255, 0, 0)\n break\n else:\n color = (0, 0, 0)\n\n screen.set_at((ix, iy), color)\n\n\ndef julia(zoom):\n for iy in range(int(height)):\n for ix in range(int(width)):\n\n c = complex(-0.8, 0.156)\n z = complex(float(ix - xaxis) / (scale * zoom), float(iy - yaxis) / (scale * zoom))\n\n for i in range(iterations):\n z = z ** 2 + c\n if abs(z) > 2:\n v = (1000 * i) / iterations\n if v > 510:\n color = (255, 255, v % 255)\n elif v > 255:\n color = (255, v % 255, 0)\n else:\n color = (v % 255, 0, 0)\n break\n else:\n color = (0, 0, 0)\n\n screen.set_at((ix, iy), color)\n\n\ndef main():\n zoom = 1\n j = True\n julia(zoom)\n pygame.display.update()\n\n while True:\n global xaxis, yaxis, height, width, scale\n event = pygame.event.poll()\n\n if (event.type == QUIT or\n (event.type == KEYDOWN and event.key == K_ESCAPE)):\n break\n if event.type == KEYDOWN and event.key == K_j:\n j = True\n julia(zoom)\n pygame.display.update()\n if event.type == KEYDOWN and event.key == K_m:\n j = False\n mandelbrot(zoom)\n pygame.display.update()\n if event.type == KEYDOWN and event.key == K_1:\n zoom *= 1.2\n if event.type == KEYDOWN and event.key == K_2:\n zoom *= 0.8\n if event.type == pygame.MOUSEBUTTONUP:\n oldzoom = zoom\n pos = pygame.mouse.get_pos()\n posx = pos[0]\n posy = pos[1]\n # translates function so that the point where the user clicked\n # is in the center of the viewport\n xaxis = (xaxis + (posx - width / 2) / 5)\n yaxis = (yaxis + (posy - height / 2) / 5)\n\n xaxis = (zoom * (posx / width + xaxis) / oldzoom - posx / width)\n yaxis = (zoom * (posy / height + yaxis) / oldzoom - posy / height)\n\n if j:\n julia(zoom)\n pygame.display.update()\n else:\n mandelbrot(zoom)\n pygame.display.update()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"theExplodeGuy/Mandelbrot-Julia-Set","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9267799137","text":"import pandas as pd\n\nfrom src.library.backtest.process import derive_table_name\nfrom src.library.dynamo.Table import Table\nfrom src.library.helpers.dates import get_dates\nfrom src.library.helpers.general import bring_columns_first\nfrom src.library.osmv.Osmv import Osmv\nfrom src.library.params import param\nfrom src.library.params import timeserie as ts_param\n\n\n#(list_of_dates, list_obj_dates) = get_dates()\nfrom src.library.params.param import BUCKET_NAME\n\nlist_obj_dates = []\nlist_of_dates = []\nfor key in BUCKET_NAME.list():\n list_of_dates.append((key.name.encode('utf-8')[-8:], \"%Y%m%d\").date())\n list_obj_dates.append(key.name.encode('utf-8')[-8:])\n\ngreek_ts_cols = ts_param.GREEK_TS_COLS\ncost_cols = ts_param.COST_COLS\nkeep_cols = ts_param.KEEP_COLS\n\nosmv = Osmv(param.IS_LOCAL, param.BUCKET_NAME)\n(dbr, dbc, s3r, s3c, bucket, db_dict) = osmv.select_env(param.ENV_USED)\n\n\n# def common_df_dates(dict_ticker_df):\n# list_dates = []\n# for ticker, df in dict_ticker_df.items():\n# list_dates.append(list(df.trade_date))\n# if len(list_dates) > 0:\n# return sorted(list(set(list_dates[0]).intersection(*list_dates)))\n# else:\n# return list_dates[0]\n#\n#\n# def align_df_dates(dict_ticker_df, columns_to_align):\n# dates = common_df_dates(dict_ticker_df)\n# for ticker, df in dict_ticker_df.items():\n# dict_ticker_df[ticker] = align_df(df, columns_to_align, dates)\n#\n# return dict_ticker_df, dates\n#\n#\n# def align_df(df, columns_to_align, com_dates):\n# dates_dict = {}\n# idx_common_dates = {}\n#\n# for d in com_dates:\n# idx_common_dates[d] = list(df.trade_date).index(d)\n#\n# dates_dict[com_dates[0]] = [df.trade_date.iloc[0]]\n# for i, d in enumerate(com_dates[1:]):\n# dates_dict[d] = list(df.trade_date.iloc[(idx_common_dates[com_dates[i]] + 1):(idx_common_dates[d] + 1)].values)\n#\n# output_df = df.copy()\n# output_df = output_df[output_df.trade_date.isin(com_dates)]\n#\n# for col in columns_to_align:\n# output_df[col] = output_df.trade_date.apply(lambda d: df[df.trade_date.isin(dates_dict[d])][col].sum())\n# return output_df\n\n\ndef load_ticker_df(table_name, ticker, start, end):\n items = Table(dbr.Table(table_name)).query_range_between(\"ticker\", ticker, \"trade_date\", start, end)\n dates = [i[\"trade_date\"] for i in items]\n if start in dates:\n return pd.DataFrame(items)\n else:\n return pd.DataFrame([])\n\n\ndef greek_neutral_ts(greek, strike, start, end, maturity_string, tickers_long, tickers_short, export_dfs=False):\n\n table_name = derive_table_name(strike, maturity_string)\n ts_dates = [d for d in list_of_dates if (start <= d <= end)]\n\n # longs\n (long_df, long_start_risk_dict, long_end_risk_dict, long_exports) = greek_weighted_ts(greek, tickers_long, table_name, start, end, export_dfs)\n\n longs = list(long_start_risk_dict.keys())\n # shorts\n (short_df, short_start_risk_dict, short_end_risk_dict, short_exports) = greek_weighted_ts(greek, tickers_short, table_name, start,\n end, export_dfs)\n shorts = list(short_start_risk_dict.keys())\n\n ls_df = pd.DataFrame(index=ts_dates)\n\n start_risk_dict = net_risk(long_start_risk_dict, short_start_risk_dict)\n end_risk_dict = net_risk(long_end_risk_dict, short_end_risk_dict)\n\n for col in keep_cols:\n ls_df[col] = long_df[col] - short_df[col]\n\n for col in cost_cols:\n ls_df[col] = long_df[col] + short_df[col]\n\n ls_df = ls_df.groupby(ls_df.columns, axis=1).sum()\n long_exports.update(short_exports)\n\n return ls_df, start_risk_dict, end_risk_dict, longs, shorts, long_exports\n\n\ndef greek_weighted_ts(greek, stocks, table_name, start, end, export_dfs=False):\n dict_ticker_df = {}\n exports = {}\n ts_dates = [d for d in list_of_dates if (start <= d <= end)]\n\n for t in stocks:\n df = load_ticker_df(table_name, t, start, end)\n if len(df) > 0:\n dict_ticker_df[t] = df\n export_df = bring_columns_first(df, [\"trade_date\",\"ticker\",\"expi\",\"spot\"])\n if export_dfs:\n sheet_key = t + \"_\" + table_name +\"_\" + start\n meta_dict = {\"ticker\": t,\n \"table name\": table_name,\n \"start date\": start,\n \"end date\": end}\n exports[sheet_key] = {\"df\": export_df, \"meta\": meta_dict}\n\n\n invert = 1\n if greek == \"theta\":\n invert = -1\n num_stocks = 0\n\n all_cols = greek_ts_cols + cost_cols\n\n start_risk_dict = {}\n end_risk_dict = {}\n kept_tickers = []\n data = {}\n units_series = {}\n\n\n for ticker, df in dict_ticker_df.items():\n df.set_index(\"trade_date\", inplace=True)\n # check timing of units calc, using greek D-1\n no_zeros = (df[greek] != 0).all()\n if no_zeros:\n # calc units\n df[\"units\"] = invert / df[greek]\n units_series[ticker] = df[\"units\"]\n units_series[ticker] = add_missing_dates_serie(units_series[ticker], ts_dates)\n\n # weighted columns\n for col in all_cols:\n df[col + \"_weighted\"] = df[\"units\"] * df[col]\n\n # greek weighted pnl\n df[\"cdh_pnl_weighted\"] = df[\"units\"].shift(periods=1) * df[\"cdh_pnl\"]\n\n df = add_missing_dates_df(df, ts_dates)\n dict_ticker_df[ticker] = df\n\n num_stocks += 1\n kept_tickers.append(ticker)\n start_risk_dict[ticker] = df.iloc[0][[\"vega\", \"min_ba\", \"units\"]]\n end_risk_dict[ticker] = df.iloc[-1][[\"vega\", \"min_ba\", \"units\"]]\n\n if num_stocks > 0:\n for col in (all_cols + [\"cdh_pnl\"]):\n data[col] = sum([dict_ticker_df[t][col + \"_weighted\"] for t in kept_tickers]) / num_stocks\n\n for ticker in kept_tickers:\n start_risk_dict[ticker][\"units\"] /= num_stocks\n end_risk_dict[ticker][\"units\"] /= num_stocks\n data[\"units_\" + ticker] = units_series[ticker]\n\n return pd.DataFrame(index=ts_dates, data=data), start_risk_dict, end_risk_dict, exports\n else:\n return empty_df(ts_dates, all_cols + [\"cdh_pnl\"]), start_risk_dict, end_risk_dict, exports\n\n\ndef add_missing_dates_df(df, ts_dates):\n add_dates = set(ts_dates) - set(df.index)\n added_dates_df = pd.DataFrame(index=add_dates, columns=df.columns)\n output = pd.concat((df, added_dates_df), axis=0).fillna(0)\n output = output.sort_index(ascending=True)\n return output\n\n\ndef add_missing_dates_serie(df, ts_dates):\n add_dates = set(ts_dates) - set(df.index)\n added_dates_df = pd.Series(index=add_dates)\n output = pd.concat((df, added_dates_df), axis=0).fillna(0)\n output = output.sort_index(ascending=True)\n return output\n\n\ndef empty_df(ts_dates, cols):\n added_dates_df = pd.DataFrame(index=ts_dates, columns=cols)\n output = added_dates_df.fillna(0)\n output = output.sort_index(ascending=True)\n return output\n\n\ndef net_risk(long_risk_dict, short_risk_dict):\n tickers = set(long_risk_dict.keys()) or set(short_risk_dict.keys())\n output = {}\n for t in tickers:\n if t in long_risk_dict.keys():\n if t in short_risk_dict.keys():\n vega = long_risk_dict[t][\"vega\"]\n min_ba = long_risk_dict[t][\"min_ba\"]\n units = long_risk_dict[t][\"units\"] - short_risk_dict[t][\"units\"]\n output[t] = pd.Series(index=[\"vega\", \"min_ba\", \"units\"], data=[vega, min_ba, units])\n else:\n vega = long_risk_dict[t][\"vega\"]\n min_ba = long_risk_dict[t][\"min_ba\"]\n units = long_risk_dict[t][\"units\"]\n output[t] = pd.Series(index=[\"vega\", \"min_ba\", \"units\"], data=[vega, min_ba, units])\n else:\n vega = short_risk_dict[t][\"vega\"]\n min_ba = short_risk_dict[t][\"min_ba\"]\n units = -short_risk_dict[t][\"units\"]\n output[t] = pd.Series(index=[\"vega\", \"min_ba\", \"units\"], data=[vega, min_ba, units])\n\n return output\n","repo_name":"BullManZ/kaziz17-osmv-Asie","sub_path":"src/library/backtest/greekneutral.py","file_name":"greekneutral.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37197114252","text":"import torch\nimport torch.nn as nn\nfrom torchvision import datasets, transforms\nfrom custom_models import RNN, LSTM, GRU\nfrom torch.utils.data import DataLoader\nimport tqdm\nimport time\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# region Hyper-parameters\n# input_size = 784 # 28x28\nnum_classes = 10\nnum_epochs = 3\nbatch_size = 500\nlearning_rate = 0.003\n\ninput_size = 28\nsequence_length = 28\nhidden_size = 128\nnum_layers = 2\n# endregion\n\n# region MNIST dataset\ntrain_dataset = datasets.MNIST(root='./data',\n train=True,\n transform=transforms.ToTensor(),\n download=True)\n\ntest_dataset = datasets.MNIST(root='./data',\n train=False,\n transform=transforms.ToTensor())\n# endregion\n\n# region Data loader\ntrain_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\n\ntest_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n# endregion\n\n# region Initialize model\nmodel = None\n\nprint('\\nPlease select an algorithm: ')\nuser_input = input('Press 1 for Recurrent Neural Network (RNN)\\nPress 2 for Long Short-Term Memory (LSTM)\\nPress 3 '\n 'for Gated Recurrent Unit (GRU) \\n---> ')\n\nif user_input == '1':\n print('\\nLoading RNN...')\n model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)\nelif user_input == '2':\n print('\\nLoading LSTM...')\n model = LSTM(input_size, hidden_size, num_layers, num_classes).to(device)\nelif user_input == '3':\n print('\\nLoading GRU...')\n model = GRU(input_size, hidden_size, num_layers, num_classes).to(device)\nelse:\n print(f'You pressed {user_input} which is an invalid input. We will use the default model (RNN) to solve the '\n 'problem.')\n model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)\n\n# endregion\n\n# region Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n# endregion\n\n# region Train the model\nn_total_steps = len(train_loader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n # origin shape: [N, 1, 28, 28]\n # resized: [N, 28, 28]\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (i + 1) % 100 == 0:\n print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{n_total_steps}], Loss: {loss.item():.4f}')\n\n# endregion\n\n# Progress bar\nprint('\\nTest in progress...')\nfor i in tqdm(range(15)):\n time.sleep(1.5)\n\n# region Test the model\n# In test phase, we don't need to compute gradients (for memory efficiency)\nwith torch.no_grad():\n n_correct = 0\n n_samples = 0\n for images, labels in test_loader:\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n outputs = model(images)\n # max returns (value ,index)\n _, predicted = torch.max(outputs.data, 1)\n n_samples += labels.size(0)\n n_correct += (predicted == labels).sum().item()\n\n acc = 100.0 * n_correct / n_samples\n print(f'Accuracy of the network on the 10000 test images: {acc} %')\n# endregion\n","repo_name":"FrancisNweke/RNN-LSTM-GRU-Algorithms","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14986983930","text":"class Solution:\n def convertTime(self, current: str, correct: str) -> int:\n a,c=map(int,current.split(\":\"))\n b,d=map(int,correct.split(\":\"))\n ma=a*60+c\n mb=b*60+d\n d=0\n if mb>=ma:\n d=mb-ma\n else:\n d=3600-(ma-mb)\n c=0\n c+=d//60\n d=d%60\n c+=d//15\n d=d%15\n c+=d//5\n d=d%5\n c+=d\n \n return c","repo_name":"Anushree1291/Leetcode-solutions","sub_path":"2224-minimum-number-of-operations-to-convert-time/2224-minimum-number-of-operations-to-convert-time.py","file_name":"2224-minimum-number-of-operations-to-convert-time.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"44973358075","text":"from NLP import *\nfrom data_crawling import *\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport statsmodels as sm\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score, mean_squared_error\n\n\ndef combined_csv(product_filename, sentiment_filename):\n print(f\"combining csv files from {product_filename} and {sentiment_filename}...\")\n product_df = pd.read_csv(product_filename)\n sorted_product_df = product_df.sort_values(by=['Product_Sold_Count($)'], ascending=False)\n sentiment_df = pd.read_csv(sentiment_filename)\n\n # Map the 'highest' column in sentiment_df to the desired numeric values\n\n # Reset index for both DataFrames to concatenate them properly\n sorted_product_df.reset_index(drop=True, inplace=True)\n sentiment_df.reset_index(drop=False, inplace=True)\n\n # Concatenate the DataFrames\n combined_df = pd.concat([sorted_product_df, sentiment_df], axis=1)\n\n # Remove rows with any empty cells\n cleaned_df = combined_df.dropna()\n del cleaned_df['index']\n\n # Save the cleaned DataFrame to a CSV file\n cleaned_df.to_csv('combined.csv', index=False)\n\n print(\"Combined csv saved to 'combined.csv'.\")\n\n\n\n\ndef statsmodels_linear_regression(data_file):\n # Load the CSV data\n df = pd.read_csv(data_file)\n\n # Add a constant to the DataFrame for the intercept term (β0)\n df = sm.add_constant(df)\n\n # Define the dependent variable (y) and the independent variables (X)\n y = df['Product_Sold_Count($)']\n X = df[['positive', 'negative', 'neutral']]\n\n # Create a linear regression model using statsmodels\n X = sm.add_constant(X)\n\n # Fit the multiple linear regression model\n model = sm.OLS(y, X).fit()\n\n # Get the results\n results_summary = model.summary()\n\n # print the model summary\n print(model.summary())\n\n # Analyze the estimated coefficients, p-values, and model fit\n coefficients = model.params\n p_values = model.pvalues\n rsquared = model.rsquared\n adj_rsquared = model.rsquared_adj\n\n # Save the results in a JSON file\n results = {\n 'coefficients': coefficients.to_dict(),\n 'p_values': p_values.to_dict(),\n 'r_squared': rsquared,\n 'adjusted_r_squared': adj_rsquared\n }\n\n with open('regression_results.json', 'w') as f:\n json.dump(results, f)\n\n print(\"Regression analysis results saved in 'regression_results.json'\")\n\n # Scatterplot matrix, show linear regression line for each plot\n sns.pairplot(df[['Product_Sold_Count($)', 'positive', 'negative', 'neutral']])\n plt.save('scatterplot_matrix.png')\n plt.show()\n\n # Heatmap of the correlation matrix\n corr_matrix = df[['Product_Sold_Count($)', 'positive', 'negative', 'neutral']].corr()\n sns.heatmap(corr_matrix, annot=True, cmap=\"coolwarm\")\n plt.save('heatmap.png')\n plt.show()\n\n\n\ndef linear_regression_ML(data_file):\n data = pd.read_csv(data_file)\n\n # Prepare the dependent and independent variables\n X = data[['positive', 'negative', 'neutral']]\n y = data['Product_Sold_Count($)']\n\n # Perform multiple linear regression analysis\n print(\"Performing linear regression analysis using scikit-learn...\")\n model = LinearRegression().fit(X, y)\n print(\"Model fit complete. Calculating model performance metrics...\")\n # Create the folder if it does not exist\n folder_name = \"ML_data_analysis_result\"\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n\n # Get the results\n coefficients = model.coef_\n intercept = model.intercept_\n r_squared = model.score(X, y)\n\n # Calculate adjusted R-squared\n n = len(y) # number of samples\n p = X.shape[1] # number of independent variables\n adjusted_r_squared = 1 - (1 - r_squared) * (n - 1) / (n - p - 1)\n\n # Save the results in a JSON file\n results = {\n 'coefficients': list(coefficients),\n 'intercept': intercept,\n 'r_squared': r_squared,\n 'adjusted_r_squared': adjusted_r_squared\n }\n\n json_file_path = os.path.join(folder_name, 'regression_results_ML.json')\n with open(json_file_path, 'w') as f:\n json.dump(results, f)\n\n print(\"Regression analysis results saved in 'regression_results_ML.json'\")\n\n print(\"Stating Data Visualization...\")\n\n\n # Data visualization\n sns.set(style=\"whitegrid\")\n\n # Pairplot to visualize the relationships between variables\n plot_data=data[['Product_Sold_Count($)', 'highest_count(pos=1,neu=0,neg=-1)']]\n sns.pairplot(plot_data)\n pairplot_filename = os.path.join(folder_name, \"pairplot_ML.png\")\n plt.savefig(pairplot_filename)\n\n # Correlation heatmap\n correlation_matrix = plot_data.corr()\n plt.figure(figsize=(10, 6))\n sns.heatmap(correlation_matrix, annot=True, cmap=\"coolwarm\")\n heatmap_filename = os.path.join(folder_name, \"heatmap_ML.png\")\n plt.savefig(heatmap_filename)\n\n # Bar plot for coefficients\n plt.figure(figsize=(8, 6))\n coef_df = pd.DataFrame(coefficients, index=X.columns, columns=['Coefficient'])\n coef_df.plot(kind='bar', legend=False)\n plt.ylabel(\"Coefficient\")\n plt.title(\"Linear Regression Coefficients\")\n barplot_filename = os.path.join(folder_name, \"barplot_ML.png\")\n plt.savefig(barplot_filename)\n\n plt.show()\n\n print(\"Data Visualization Complete.\")\n\n\n","repo_name":"learnbeing/Data_Analysis_waibao","sub_path":"Sentimental_Analysis/Data_Analysis.py","file_name":"Data_Analysis.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16096705768","text":"import array\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pickle\r\nplt.rcParams['figure.figsize'] = (5, 5)\r\ndata=pd.read_csv(\"healthcare-dataset-stroke-data.csv\")\r\n\r\n\r\ncorr_matrix = data.corr()\r\nplt.figure(figsize=(20,15))\r\nsns.heatmap(corr_matrix, annot=True)\r\n\r\n\r\n\r\n\r\ndata.isnull().sum()\r\n\r\ndata['bmi'].fillna(data['bmi'].mean(),inplace=True)\r\n\r\ndata.drop('id',axis=1,inplace=True)\r\n\r\nfrom matplotlib.pyplot import figure\r\nfigure(num=None, figsize=(8, 6), dpi=800, facecolor='w', edgecolor='k')\r\ndata.plot(kind='box')\r\n\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nenc=LabelEncoder()\r\n\r\ngender=enc.fit_transform(data['gender'])\r\n\r\nsmoking_status=enc.fit_transform(data['smoking_status'])\r\n\r\nwork_type=enc.fit_transform(data['work_type'])\r\nResidence_type=enc.fit_transform(data['Residence_type'])\r\never_married=enc.fit_transform(data['ever_married'])\r\ndata['work_type']=work_type\r\n\r\ndata['ever_married']=ever_married\r\ndata['Residence_type']=Residence_type\r\ndata['smoking_status']=smoking_status\r\ndata['gender']=gender\r\ndata\r\n\r\nX=data.drop('stroke',axis=1)\r\nY=data['stroke']\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, Y_train, Y_test=train_test_split(X,Y,test_size=0.5,random_state=10)\r\n\r\n\r\n\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nstd=StandardScaler()\r\nX_train_std=std.fit_transform(X_train)\r\nX_test_std=std.transform(X_test)\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nrf=RandomForestClassifier(n_estimators=5,criterion=\"entropy\")\r\nrf.fit(X_train_std,Y_train)\r\nY_pred=rf.predict(X_train_std)\r\nprint(Y_pred)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nac_rf=accuracy_score(Y_test,Y_pred)\r\n\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nconfusion_matrix(Y_test,Y_pred)\r\ntp, fp, fn, tn = confusion_matrix(Y_test, Y_pred).ravel()\r\n(tp, fp, fn, tn)\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score\r\n\r\n\r\n\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nmodel=DecisionTreeClassifier()\r\nmodel.fit(X_train_std,Y_train)\r\n \r\n\r\n \r\n \r\nY_pred_lr=model.predict(X_test_std)\r\n#Y_pred_lr\r\nac_lr=accuracy_score(Y_test,Y_pred_lr)\r\n\r\n\r\n\r\n\r\npickle.dump(model, open('model.pkl','wb'))\r\nprint(data)\r\n# Loading model to compare the results\r\nmodel = pickle.load(open('model.pkl','rb'))\r\n","repo_name":"millenniumSonule/deployment","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36972816610","text":"from __future__ import print_function\nimport openmm as mm\nimport openmm.app as app\nimport openmm.unit as u\n#from reducedstatedatareporter import ReducedStateDataReporter\nimport numpy as np\nimport os, sys\nimport parmed as pmd\nimport json\nfrom sys import platform\nimport unittest\nimport csv\nimport sys\nimport yaml\n\nfilename = str(sys.argv[1])\n\nwith open(filename) as file:\n params = yaml.load(file, Loader=yaml.FullLoader)\n\nN=int(params[\"num_particles\"])\nT=float(params[\"temperature\"])\nk=float(params[\"k_contig\"]) # k elastico contigui\nk_h=float(params[\"k_helix\"]) # k elastico elica\nl_0=float(params[\"length_rest\"]) \nl_0_h=float(params[\"length_rest_helix\"])\nm=float(params[\"mass\"])\neps=float(params[\"epsilon\"])\nsigma=float(params[\"sigma\"])\nn_blocks=int(params[\"n_blocks\"])\nn_steps=int(params[\"n_steps\"])\ntimestep=float(params[\"timestep\"])\n\nprint(\"Numero di monomeri: \", N)\n### setting LJ parameters (they can be one for each couple)\nepsilon_r = np.full(N, eps, dtype=\"float64\")\nsigmas_r=np.full(N, sigma, dtype=\"float64\")\nsigmaAR_r = np.zeros((N, N), dtype=\"float64\")\nepsilonAR_r = np.zeros((N, N), dtype=\"float64\")\n\nfor i in range(N):\n for j in range(i,N):\n sigmaAR_r[i][j] = (sigmas_r[i]+sigmas_r[j])/2.0\n sigmaAR_r[j][i] = sigmaAR_r[i][j]\n\nfor i in range(N):\n for j in range(i,N):\n epsilonAR_r[i][j] = (epsilon_r[i]+epsilon_r[j])/2.0\n epsilonAR_r[j][i] = epsilonAR_r[i][j]\n\nepsilonLST_r = (epsilonAR_r).ravel().tolist()\nsigmaLST_r = (sigmaAR_r).ravel().tolist()\nmasses_r= np.full(N, m, dtype=\"float64\")\n\n########### Building system + particles\nsystem = mm.System()\npositions = np.empty((N, 3)) # matrix 3*N (3D)\n\ncutoff_r=10.*max(sigmaLST_r) #cutoff distance for LJ\n########### Building forces\nbox_edge_r=1000.\nsystem.setDefaultPeriodicBoxVectors(mm.Vec3(box_edge_r, 0, 0), mm.Vec3(0, box_edge_r, 0), \n mm.Vec3(0, 0, box_edge_r))\n\n#harmonic \n\nk_arr= np.full(N, k, dtype=\"float64\")\nk_arr_d= np.full(N-3, k_h , dtype=\"float64\") #per alpha elica\nel_force = mm.HarmonicBondForce()\nfor i in range(N-1):\n el_force.addBond( i, i+1, 0.38, k_arr[i]) #particle 1, particle 2, length at rest, k elastic (unit: kJ/mol/nm^2)\n\n#elastic force between i and i+3\n#for i in range(N-4):\n# el_force.addBond( i, i+3, 0.516, k_arr_d[i]) #particle 1, particle 2, length at rest, k elastic (unit: kJ/mol/nm^2)\n\n\n#lennard-jones\nlj_force = mm.CustomNonbondedForce('4*eps*((sig/r)^12-(sig/r)^6); eps=epsilon(type1, type2); sig=sigma(type1, type2)')\nlj_force.setNonbondedMethod(mm.NonbondedForce.CutoffPeriodic)\nlj_force.setCutoffDistance(min(box_edge_r*0.49*u.nanometers, cutoff_r*u.nanometers))\nlj_force.addTabulatedFunction('epsilon', mm.Discrete2DFunction(N, N, \n epsilonLST_r))\nlj_force.addTabulatedFunction('sigma', mm.Discrete2DFunction(N, N,\n sigmaLST_r))\nlj_force.addPerParticleParameter('type')\n\n\n# set the initial particle parameters\nfor i in range(N):\n system.addParticle(masses_r[i]*u.amu)\n positions[i] = [i, 0.1*i, -0.3*i] + 10*np.random.rand(3)\n lj_force.addParticle([i])\n \nsystem.addForce(lj_force)\nsystem.addForce(el_force)\ntol=0.3\nmaxIter=0.\n\ninteg = mm.LangevinIntegrator(T, 1.0, timestep)\n#integ = mm.VerletIntegrator(0.001)\n#integ = mm.VariableVerletIntegrator(0.1)\n\ncontext = mm.Context(system, integ, mm.Platform.getPlatformByName('CPU'))\ncontext.setPositions(positions)\nstate = context.getState(getEnergy=True, getForces=True, getPositions=True)\n#print('positions before minimization: ', np.array(state.getPositions()/u.nanometer))\nmm.LocalEnergyMinimizer.minimize(context, tol, maxIter)\ncontext.setVelocitiesToTemperature(0) \n\nstate = context.getState(getEnergy=True, getForces=True, getPositions=True)\nprint('potential energy after minimization: ', state.getPotentialEnergy())\nprint('kinetic energy after minimization: ', state.getKineticEnergy())\nprint('total energy after minimization: ', state.getKineticEnergy() + state.getPotentialEnergy())\n\n### write on xyz and csv file\n# name of the file tells number of particles, k contiguous and temperature\nname = 'num' + str(N) + 'k' + str(k) + 'temp' + str(T)\nwith open(name + '.xyz', 'w', newline='') as file, open(name + '.csv', 'w', newline='') as file_csv:\n for j in range(n_blocks): #number of blocks of integration\n file.write(str(N))\n file.write('\\n')\n file.write('\\n')\n integ.step(n_steps) #steps for each block\n state = context.getState(getEnergy=True, getForces=True, getPositions=True)\n alpha=state.getPositions(asNumpy=True)/u.angstrom\n for i in range(N):\n file.write(f\"C {alpha[i][0]} {alpha[i][1]} {alpha[i][2]}\\n\")\n file_csv.write(f\"{alpha[i][0]} {alpha[i][1]} {alpha[i][2]} \")\n file_csv.write(\"\\n\")\n\n\nstate = context.getState(getEnergy=True, getForces=True, getPositions=True)\n\nprint('============================================')\nprint('potential energy after integration: ', state.getPotentialEnergy())\nprint('kinetic energy after integration: ', state.getKineticEnergy())\nprint('total energy after integration: ', state.getKineticEnergy() + state.getPotentialEnergy())\n\nprint(\"simulation: \", n_blocks, \" frames, \", n_steps , \" timesteps each\")\n","repo_name":"giorginolab/polymer-dim-thesis","sub_path":"esempio.py","file_name":"esempio.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31328908846","text":"from ParticleBunchClass import ParticleBunch\r\nfrom MagneticExternalField import MagneticExternalFieldClass\r\nfrom ElectricExternalField import ElectricExternalFieldClass\r\nfrom SumEMFields import EMFieldClass\r\n\r\nimport numpy as np \r\nimport math\r\nimport scipy.constants as const\r\nimport pytest\r\nimport re\r\n\r\ntest_ParticleBunch = ParticleBunch(numberOfParticles=2, bunchPositionSpread=0.0\r\n, bunchEnergySpread=0.0, bunchMeanEnergy=1.5032775929044686e-10, restMassOfBunch=const.proton_mass\r\n, chargeOfBunch=1.0, name=\"test_Bunch\")\r\ntest_ParticleBunch.listOfParticles[0].position = np.array([1.0, 2.0, 3.0])\r\ntest_ParticleBunch.listOfParticles[1].position = np.array([4.0, 5.0, 6.0])\r\ntest_ElectricExternalField = ElectricExternalFieldClass(electricFieldStrength=np.array([4.0, 5.0, 6.0])\r\n, angularFrequency=10.0, phaseShift=0.1, listOfDimensions=[[-4, 4], [8, 16], [-16, -8]]\r\n, name=\"test_Electric Field\")\r\ntest_MagneticExternalField = MagneticExternalFieldClass(magneticFieldStrength=np.array([7.0, 8.0, 9.0])\r\n, angularFrequency=15.0, phaseShift=0.2, listOfDimensions=[[-3, 3], [6, 12], [-12, -6]]\r\n, name=\"test_Magnetic Field\")\r\ntest_EMField = EMFieldClass(bunchOfParticles=test_ParticleBunch, listOfElectricFields=[]\r\n, listOfMagneticFields=[], name=\"test_EM Field\")\r\n\r\ndef test_EMField__repr__():\r\n # checks the repr function works\r\n assert re.findall(\"EM Field Collection: test_EM Field\", test_EMField.__repr__()) == [\"EM Field Collection: test_EM Field\"]\r\n\r\ndef test_SumOfEMFields():\r\n # checks that the total electromagnetic field is added correctly\r\n assert test_EMField.SumOfEMFields(affectedParticle=test_ParticleBunch.listOfParticles[0]\r\n , timeElapsed=1.0) == [pytest.approx(np.array([-1.92183905e+08, -1.92183905e+08, -1.92183905e+08]), rel=0.01)\r\n , pytest.approx(np.array([0.0, 2.13835144e-06, -2.13835144e-06]), rel=0.01)]\r\n\r\ndef test_GiveAcceleration():\r\n # checks that the resulting acceleration from the electromagnetic field is correct\r\n test_EMField.GiveAcceleration(particleBunch=test_ParticleBunch, timeElapsed=1.0)\r\n assert test_ParticleBunch.listOfParticles[0].acceleration == pytest.approx(\r\n np.array([-1.1489979e+35, -1.1489979e+35, -1.1489979e+35]), rel=0.01)\r\n","repo_name":"Lancaster-Physics-Phys389-2020/phys389-2020-project-sun-moon-and-tables","sub_path":"test_SumEMFields.py","file_name":"test_SumEMFields.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7532458579","text":"import json\nimport gamewall\nimport gametank\nimport gameExplode\nfrom gamebullet import Bullet\nimport gamebullet\nimport Text\nimport gamegetevent\nfrom constant import *\nfrom gamevictory import victory\nfrom gamedefeat import defeat\nimport gamedrop\nimport game_show_imformation\nimport gamearchitecture\nfrom gamecheck import *\nimport v_or_d\n\nBG_COLOR = pygame.Color(0, 0, 0)\n\n'''\n公式:\n穿透判定:总穿透力(All_Penetration)-总装甲值(All_Armor)>=0,可穿透\n 总穿透力(All_Penetration)=(火炮火力(fire)+炮弹穿透(gun_Penetration))*浮动系数(0.8~1.2)+(精准判定*100)\n 精准判定取决于对装甲精准系数(to_armor_accuracy,初始值为5%),若触发则为1,不触发则为0\n伤害判定:\n 穿透时:\n 总伤害(All_Damage)=(火炮火力(fire)+炮弹伤害(gun_Damage))*浮动系数(0.8~1.2)*对结构精准系数(to_structure_accuracy,初始为1)\n 未穿透时:\n 总伤害=(火炮火力+炮弹伤害)*浮动系数*对结构精准系数*0.2\n \n'''\n\n\nclass MainGame:\n window: pygame.Surface = None\n my_tank: gametank.MyTank = None\n time_info = \"30:00\"\n # 存储敌方坦克的列表\n enemyTankList = []\n # 存储敌方炮塔的列表\n enemyBatteryList = []\n #存储敌方指挥部的列表\n enemyCommandList = []\n #存储我方指挥部的列表\n myCommandList = []\n #存储我方炮台\n myBatteryList = []\n # 存储我方子弹的列表\n myBulletList = []\n # 我方当前选中的炮弹\n bullet_now = 0\n # 炮弹选择框\n bullet_choice_rect = [pygame.rect.Rect(1160, 250, 100, 45), pygame.rect.Rect(1160, 325, 100, 45),\n pygame.rect.Rect(1160, 400, 100, 45)]\n # 定义我方炮弹数量\n AP_num = 0\n HE_num = 0\n APCR_num = 0\n # 存储敌方子弹的列表\n enemyBulletList = []\n # 存储掉落物的列表\n dropList = []\n # 存储爆炸效果的列表\n explodeList = []\n explodebigList = []\n explodesmallList = []\n # 存储墙壁的列表\n wallList = []\n waterList = []\n grassList = []\n map_info = None\n # 精灵组\n sprite_group = pygame.sprite.Group()\n # 游戏时钟\n clock = pygame.time.Clock()\n # 掉落物概率\n drops_probability = {}\n # 记录游戏开始的时刻\n time_start = 0\n # 时间上限\n time = 0\n\n # 开始游戏\n @staticmethod\n def startGame(n):\n map_index = n\n # 获取地图路经\n map_path = 'maps/map' + str(map_index) + '.json'\n # 设置炮弹选择为穿甲弹\n MainGame.bullet_now = 0\n # 加载主窗口\n # 初始化窗口\n pygame.display.init()\n # 设置窗口的大小及显示\n MainGame.window = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])\n # 读入地图信息\n with open(map_path, 'r', encoding='utf-8') as f:\n MainGame.map_info = json.load(f)\n # 初始化我方坦克\n MainGame.my_tank = createMytank(MainGame.map_info['Player'])\n # 初始化敌人,并将敌人添加到列表中\n create_enemy(MainGame, MainGame.map_info['Enemies'])\n # 初始化墙壁\n gamewall.createWall(MainGame, MainGame.map_info['MapBlocks'])\n # 读取我方炮弹数量\n MainGame.AP_num = MainGame.map_info['Bullets']['AP']\n MainGame.HE_num = MainGame.map_info['Bullets']['HE']\n MainGame.APCR_num = MainGame.map_info['Bullets']['APCR']\n # 读取掉落物概率\n MainGame.drops_probability = MainGame.map_info['DropsProbability']\n # 读取最大时间\n MainGame.time = MainGame.map_info['Time']\n # 设置窗口的标题\n pygame.display.set_caption('Soul Tank')\n # 记录开始的时刻\n time_start = pygame.time.get_ticks()\n while True:\n # 使用坦克移动的速度慢一点\n MainGame.clock.tick(60)\n # 给窗口设置填充色\n MainGame.window.fill(BG_COLOR)\n # 获取事件\n if gamegetevent.getEvent(MainGame):\n return\n # 绘制信息板\n game_show_imformation.show(MainGame)\n # 绘制剩余时间\n time_now = pygame.time.get_ticks()\n rest_time = MainGame.time + time_start - time_now\n minute = rest_time // 60000\n second = rest_time % 60000 // 1000\n MainGame.time_info = '%02d:%02d' % (minute, second)\n MainGame.window.blit(Text.getTextSufaceGreen(MainGame.time_info), (1180, 35))\n # 绘制文字\n MainGame.window.blit(Text.getTextSufaceRed('%d' % len(MainGame.enemyTankList)), (1220, 110))\n # 绘制炮弹数量\n MainGame.window.blit(Text.getTextSufaceRed('%d' % MainGame.AP_num), (1220, 260))\n MainGame.window.blit(Text.getTextSufaceRed('%d' % MainGame.HE_num), (1220, 335))\n MainGame.window.blit(Text.getTextSufaceRed('%d' % MainGame.APCR_num), (1220, 410))\n # 绘制炮弹选择框\n pygame.draw.rect(MainGame.window, blue, MainGame.bullet_choice_rect[MainGame.bullet_now], 4)\n # 调用坦克显示的方法\n # 判断我方坦克是否是否存活\n if MainGame.my_tank and MainGame.my_tank.live:\n # 展示我方坦克\n MainGame.my_tank.displayTank(MainGame)\n if MainGame.my_tank.status.health > 3:\n MainGame.window.blit(Text.getTextSufaceGreen('%d' % MainGame.my_tank.status.health), (1220, 185))\n elif MainGame.my_tank.status.health > 1:\n MainGame.window.blit(Text.getTextSufaceYellow('%d' % MainGame.my_tank.status.health), (1220, 185))\n else:\n MainGame.window.blit(Text.getTextSufaceRed('%d' % MainGame.my_tank.status.health), (1220, 185))\n else:\n # 删除我方坦克\n del MainGame.my_tank\n MainGame.window.blit(Text.getTextSufaceRed('0'), (1220, 185))\n MainGame.my_tank = None\n # 循环遍历敌方坦克列表,检查敌方坦克\n check_enemy_tank(MainGame)\n # 循环遍历敌方炮塔列表,检查敌方炮塔\n check_enemy_battery(MainGame)\n # 循环遍历我方炮塔列表,检查我方炮塔\n check_my_battery(MainGame)\n # 循环遍历敌方指挥部列表,检查敌方指挥部\n check_enemy_command(MainGame)\n # 循环遍历我方指挥部列表,检查我方指挥部\n check_my_command(MainGame)\n # 循环遍历检查我方坦克的子弹\n checkMyBullet(MainGame)\n # 循环遍历检查掉落物\n gamedrop.check_drop(MainGame)\n # 循环遍历敌方子弹列表,检查敌方子弹\n checkEnemyBullet(MainGame)\n # 循环遍历墙壁列表,展示墙壁\n gamewall.blitWall(MainGame)\n # 循环遍历敌方坦克列表,展示敌方坦克\n blit_enemy_tank(MainGame)\n # 循环遍历敌方坦克列表,展示敌方炮塔\n blit_enemy_architecture(MainGame)\n # 循环遍历显示我方坦克的子弹\n blitMyBullet(MainGame)\n # 循环遍历敌方子弹列表,展示敌方子弹\n blitEnemyBullet(MainGame)\n # 循环遍历掉落物列表,展示敌方掉落物\n gamedrop.blit_drop(MainGame)\n # 循环遍历草列表,展示草\n gamewall.blitGrass(MainGame)\n # 循环遍历爆炸列表,展示爆炸效果\n gameExplode.blitExplode(MainGame)\n gameExplode.blitbigExplode(MainGame)\n gameExplode.blitsmallExplode(MainGame)\n # 显示特效\n MainGame.sprite_group.update(time_now)\n MainGame.sprite_group.draw(MainGame.window)\n # 判断是否有敌人剩余rest_time\n if v_or_d.V_or_D(n, MainGame, rest_time):\n return\n pygame.display.update()\n\n\n# 初始化敌人,并将敌人添加到列表中\ndef create_enemy(MainGame, enemy_info: dict):\n for enemy in enemy_info:\n if enemy['EnemyType'] == \"Light\":\n enemy = gametank.EnemyTank('LightTank', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.enemyTankList.append(enemy)\n elif enemy['EnemyType'] == \"Middle\":\n enemy = gametank.EnemyTank('MediumTank', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.enemyTankList.append(enemy)\n elif enemy['EnemyType'] == \"Heavy\":\n enemy = gametank.EnemyTank('HeavyTank', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.enemyTankList.append(enemy)\n elif enemy['EnemyType'] == \"Heavy2\":\n enemy = gametank.EnemyTank('HeavyTank2', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.enemyTankList.append(enemy)\n elif enemy['EnemyType'] == \"Battery\":\n enemy = gamearchitecture.Battery('Battery', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.enemyBatteryList.append(enemy)\n elif enemy['EnemyType'] == \"myBattery\":\n enemy = gamearchitecture.myBattery('Battery', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.myBatteryList.append(enemy)\n elif enemy['EnemyType'] == \"Command\":\n enemy = gamearchitecture.Command('Command', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.enemyCommandList.append(enemy)\n elif enemy['EnemyType'] == \"myCommand\":\n my = gamearchitecture.Command('Command', enemy['x'] * 60, enemy['y'] * 60)\n MainGame.myCommandList.append(my)\n\n","repo_name":"zzzgod/SoulTank","sub_path":"maingame.py","file_name":"maingame.py","file_ext":"py","file_size_in_byte":9749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6222832082","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support.expected_conditions import alert_is_present\nfrom selenium.webdriver.support.select import Select\n\nbrowser = webdriver.Chrome()\nbrowser.get(\"https://techstepacademy.com/training-ground\")\nprint(\"I have arrived\")\n\nbtn_one = browser.find_element_by_id(\"b1\")\nbtn_one.click()\nWebDriverWait(browser, 10).until(alert_is_present())\nprint(\"An alert appeared\")\nalert_btn1 = browser.switch_to.alert\nalert_btn1.accept()\n\nsel = browser.find_element_by_id('sel1')\nmy_select = Select(sel)\n\nmy_select.select_by_visible_text(\"Battlestar Galactica\")\nmy_select.select_by_index(0)\nmy_select.select_by_value(\"second\")\nprint(my_select.first_selected_option.text)\n\nbrowser.quit()\n","repo_name":"Freewin/seleniumExperiments","sub_path":"scriptStyle/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70245758250","text":"import collections\n\n\ndef main():\n # first_input = '/Users/christianhardmeier/Documents/project/2020-WMT-Chat/devset-checked.txt'\n # fnames = [\n # ('ED-PC', {\n # 'customer': 'translations-uedin/ft-paracrawl+train.chat.dev.de-en.en',\n # 'agent': 'translations-uedin/ft-paracrawl+train.chat.dev.en-de.de'\n # }),\n # ('FB-TR', {\n # 'customer': 'translations-fair/ft-train.chat.dev.de-en.en',\n # 'agent': 'translations-fair/ft-train.chat.dev.en-de.de'\n # })\n # ]\n first_input = '/Users/christianhardmeier/Documents/project/2020-WMT-Chat/devset-checked-subset-4trans.txt'\n fnames = [\n ('ED-TR', {\n 'customer': 'translations-uedin/ft-train.chat.dev.de-en.en',\n 'agent': 'translations-uedin/ft-train.chat.dev.en-de.de'\n })\n ]\n\n systems = collections.OrderedDict()\n for system, fn in fnames:\n translations = {}\n systems[system] = translations\n for speaker, fname in fn.items():\n with open(fname, 'r') as f:\n translations[speaker] = [line.rstrip('\\n') for line in f]\n\n with open(first_input, 'r') as f:\n for line in f:\n line = line.rstrip('\\n')\n print(line)\n if line.startswith('customer:') or line.startswith('agent:'):\n speaker = line[:line.index(':')]\n for system, translations in systems.items():\n print(' [%s] ' % system + translations[speaker].pop(0))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chardmeier/WMT2020-Chat","sub_path":"add_more_translations.py","file_name":"add_more_translations.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"24084192774","text":"import csv\nimport pprint\n\n'''def costo_camion(nombre_archivo):\n costo_total = 0\n archivo = open(nombre_archivo)\n encabezado = next(archivo)\n for n_linea, linea in enumerate(archivo, start=1):\n linea = linea.split(',')\n try:\n precio_fruta = int(linea[1]) * float(linea[2])\n costo_total += precio_fruta\n except ValueError:\n print(f'Fila {n_linea}: No pude interpretar: {linea}')\n print(\"El costo total es:\", costo_total)\n archivo.close()\n return costo_total\n\ncosto = costo_camion('../../Data/missing.csv')'''\n\n'''f = open('../../Data/camion.csv')\nfilas = csv.reader(f)\nencabezados = next(filas)\nfila = next(filas)\nlista = []\n\nfor fila in filas:\n lista.append(list(zip(encabezados, fila)))\n\npprint.pprint(lista)'''\n\ndef costo_camion(nombre_archivo):\n costo_total = 0\n f = open(nombre_archivo)\n filas = csv.reader(f)\n encabezados = next(filas)\n for n_fila, fila in enumerate(filas, start=1):\n record = dict(zip(encabezados, fila))\n try:\n ncajones = int(record['cajones'])\n precio = float(record['precio'])\n costo_total += ncajones * precio\n except ValueError:\n print(f'Fila {n_fila}: No pude interpretar: {fila}')\n print(f'El número de cajones es de {ncajones} y el precio por cajon es de {precio}')\n return costo_total\n\n#costo = costo_camion('../../../Data/missing.csv')\n#print(costo)\n\n\n#fecha = costo_camion('../../../Data/fecha_camion.csv')\n#print(fecha)\n","repo_name":"LenaSofia/Programacion_en_Python_UNSAM","sub_path":"Notas/03_Datos/ejercicios/secuencias/costo_camion.py","file_name":"costo_camion.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"es","doc_type":"code","dataset":"github-code","pt":"53"}
+{"seq_id":"7143818141","text":"#coding:utf-8\n#AOJ用(TLEになるので高速化する)\n\nimport math\nimport sys\n\ndef f(L):\n if isinstance(L, list):\n if L == []:\n return []\n else:\n return f(L[0]) + f(L[1:])\n else:\n return [L]\n\ndef E(d):\n\n p_list = [] \n tmp = []\n num = int(math.sqrt(len(d)))+1\n for i in d:\n i=d[0]\n if i>num:\n break\n else:\n p_list.append(i)\n for j in d:\n if j%i == 0:\n d.remove(j)\n \n p_list.append(d)\n return p_list\n\nlists = []\nfor line in sys.stdin:\n x= list(range(2,(int(line)+1)))\n print(len(f(E(x))))\n","repo_name":"hatobus/Eratosthenes","sub_path":"AOJ0009.py","file_name":"AOJ0009.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21241494817","text":"from django.conf import settings\nfrom django.core.mail import send_mail\n\n\ndef auth_send_mail(user_email, activ_url):\n send_mail(\n subject='Подтверждение почты при регистрации',\n message=f'Вы успешно зарегистрировались.\\n'\n f'Для завершения регистрации пройдите по ссылке: \\n'\n f'http://127.0.0.1:8000{activ_url}',\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[user_email],\n fail_silently=False\n )","repo_name":"AAiev/Course_Project_06","sub_path":"users/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31530282179","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 27 00:08:36 2022\r\n\r\n@author: IanChen\r\n\"\"\"\r\nimport copy\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom .PdGeom import PdGeom\r\nfrom .PdProject import PdProject\r\nfrom .PdBatch import *\r\n\r\ndef SourceProfile(df, scan_dir, norm_dir, high_dir):\r\n geometry = PdGeom(df)\r\n geometry.estimate(20) \r\n \r\n df_copy = copy.deepcopy(df)\r\n PdProject(df_copy, [1, 1, 0], [1, 0, 0], [0, 1, 0], [\"X\", \"Y\", \"Z\"], [\"PX\", \"PY\", \"PZ\"]) \r\n df_copy_proj = df_copy[[\"PX\", \"PY\", \"PZ\"]]\r\n df_copy_proj.columns = [\"X\", \"Y\", \"Z\"]\r\n\r\n# geometry2 = PdGeom(df_copy_proj)\r\n# df_proj = geometry2.estimate(20) \r\n# source_proj_plan, source_proj_corr = geometry2.planarize(0.4)\r\n# draw_pd_result(source_proj_plan, source_proj_corr)\r\n\r\n# #%% \r\n# df_copy[\"Planarity\"] = df_proj[\"Planarity\"]\r\n# source_plan, source_corr = PdPlanarize(df_copy, 0.5)\r\n# source_plan = df.loc[source_plan.index]\r\n# draw_pd_result(source_plan, source_corr)\r\n#%%\r\n df_copy_proj_group = PdGroup(df_copy_proj, scan_dir, norm_dir, high_dir, 1, norm_dir)\r\n group_size_list = [len(ele) for ele in df_copy_proj_group]\r\n # sns.histplot(data=group_size_list)\r\n\r\n group_size_list_std = np.std(group_size_list)\r\n group_size_list_mean = np.mean(group_size_list)\r\n group_size_pd = pd.DataFrame(group_size_list)\r\n group_size_index = group_size_pd[group_size_pd[0] > (group_size_list_mean-2*group_size_list_std)].index.values\r\n df_copy_proj_group_new = [df_copy_proj_group[i] for i in group_size_index]\r\n \r\n # sns.kdeplot(group_size_list, shade=True)\r\n # plt.show()\r\n#%%\r\n \"\"\"Group\"\"\"\r\n group_max_min = []\r\n for ele in df_copy_proj_group_new:\r\n if len(ele) > 2:\r\n group_elements = df_copy_proj.loc[ele]\r\n # print(\"group_elements :\\n\", group_elements)\r\n min_index = group_elements[norm_dir].idxmin()\r\n # max_index = group_elements[norm_dir].idxmax()\r\n # group_max_min.append([min_index])\r\n group_elements_sort = group_elements.sort_values(by=[norm_dir], ascending=True)\r\n # print(\"group_elements sort :\\n\", group_elements_sort)\r\n # print(\"min :\", group_elements.loc[min_index][norm_dir])\r\n # print(\"max :\", group_elements.loc[max_index][norm_dir])\r\n # print(\"min_index :\\n\", min_index)\r\n # print(\"min_index :\\n\", group_elements_sort[:2].index.values.tolist())\r\n # print(\"type :\\n\", type(group_elements_sort[:2].index.values.tolist()))\r\n group_max_min.append(group_elements_sort[:2].index.values.tolist())\r\n\r\n group_max_min = np.concatenate(group_max_min)\r\n\r\n temp = copy.deepcopy(df_copy_proj)\r\n temp[\"Y\"] = temp[\"Y\"].round(decimals=2)\r\n df_xyz_groupby = temp.groupby([\"Y\"])\r\n df_xyz_group = df_xyz_groupby.groups\r\n\r\n temp = df_copy.loc[group_max_min]\r\n temp_copy = copy.deepcopy(df_copy.drop(group_max_min)) \r\n return temp_copy, temp","repo_name":"IanChen0718/burr-identification","sub_path":"utils/SourceProfile.py","file_name":"SourceProfile.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15321192283","text":"from typing import Dict, Tuple\n\nimport pytorch_lightning as pl\nimport torch\nfrom utils import Pix2Patch\nfrom configuration import CONSTANTS as C\nfrom configuration import Configuration, create_loss, create_optimizer\n\n\nclass BaseModel(pl.LightningModule):\n \"\"\"A base class for neural networks that defines an interface and implements automatic\n handling of patch-wise and pixel-wise loss functions and metrics. By default, a model \n is expected to ouput pixel-wise and patch-wise, depending on config.model_out. Please\n raise an error, if your model does not support one of the methods.\"\"\"\n\n def __init__(self, config:Configuration):\n super().__init__()\n\n self.config = config\n self.loss = create_loss(config)\n\n if config.model_out == 'patch' and config.loss_in == 'pixel':\n raise RuntimeError(f'Invalid configuration: model_out=patch, loss_in=pixel.')\n\n # prepare dimensions:\n if self.config.model_out == 'patch':\n self.out_size = int(C.IMG_SIZE / C.PATCH_SIZE)\n\n elif self.config.model_out == 'pixel':\n self.out_size = C.IMG_SIZE\n\n # automatic pixel to patch transform by averaging \n self.pix2patch = Pix2Patch(C.PATCH_SIZE)\n self.sigmoid = torch.nn.Sigmoid()\n\n # output modes of model: for pixelwise model, also the patchwise outputs are tracked\n self.outmodes = ['patch', 'pixel'] if (config.model_out=='pixel') else ['patch']\n\n self.save_hyperparameters()\n\n\n def configure_optimizers(self):\n t_opt, kwargs = create_optimizer(self.config)\n return t_opt(self.parameters(), **kwargs)\n\n def forward(self, batch:torch.Tensor) -> torch.Tensor:\n n_samples, n_channels, in_size, in_size = batch.shape\n raise NotImplementedError(\"Must be implemented by subclass.\")\n return batch.reshape(n_samples, 1, self.out_size, self.out_size)\n\n\n def step(self, batch:Dict[str, torch.Tensor], batch_idx):\n images = batch['image']\n targets = batch['mask'].unsqueeze(1) # get channel dimension\n\n # forward through model to obtain logits and probabilities\n logits = self(images)\n probas = self.sigmoid(logits)\n\n # nested dict for patchwise and pixelwise prediction as output\n out = dict([(mode, dict()) for mode in self.outmodes]) \n\n # model output might come pixel- or patchwise\n if self.config.model_out == 'pixel':\n out['pixel']['logits'] = logits # logits are pixelwise\n out['pixel']['probas'] = probas # probas are pixelwise\n out['pixel']['targets'] = targets # targets are pixelwise\n out['patch']['probas'] = self.pix2patch(probas)\n out['patch']['logits'] = torch.logit(out['patch']['probas']) # not very nice... but if loss takes logits, we need to :/\n out['patch']['targets'] = self.pix2patch(targets)\n else: \n out['patch']['logits'] = logits # logits are patchwise\n out['patch']['probas'] = probas # probas are patchwise\n out['patch']['targets'] = self.pix2patch(targets) # targets are patchwise\n \n\n # compute loss from pixel or patch with soft predictions/targets\n if self.config.loss_in == 'pixel':\n out['loss'] = self.loss(out['pixel']['logits'], out['pixel']['targets'])\n else:\n out['loss'] = self.loss(out['patch']['logits'], out['patch']['targets'])\n\n return out\n\n\n def training_step(self, batch:dict, batch_idx):\n out = self.step(batch, batch_idx)\n self.log('train/loss', out['loss'])\n return out\n \n\n def validation_step(self, batch:dict, batch_idx):\n out = self.step(batch, batch_idx)\n self.log('valid/loss', out['loss'])\n return out\n\n\n def apply_threshold(self, soft_labels, outmode:str) -> Tuple[torch.Tensor, torch.Tensor]:\n threshold = C.THRESHOLD if (outmode=='patch') else 0.5\n hard_labels = (soft_labels > threshold).float() # convert to hard labels\n return hard_labels\n\n\n def predict_step(self, batch:dict, batch_idx):\n images = batch['image']\n i_inds = batch['idx'] # image indices\n\n # get probabilities\n probas = self.sigmoid(self(images))\n if self.config.model_out == 'pixel': \n probas = self.pix2patch(probas)\n\n # get predictions\n preds = self.apply_threshold(probas, 'patch').int().squeeze() # remove channels\n\n # get submission table\n rows = []\n for k in range(preds.shape[0]):\n for i in range(preds.shape[1]):\n for j in range(preds.shape[2]):\n row = [i_inds[k], j*C.PATCH_SIZE, i*C.PATCH_SIZE, preds[k, i, j]]\n rows.append(torch.tensor(row).unsqueeze(0))\n\n return torch.cat(rows, dim=0)\n","repo_name":"jjurm/road-segmentation","sub_path":"models/BaseModel.py","file_name":"BaseModel.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18378638214","text":"#Day 09\r\n\r\nimport math\r\n\r\nwith open('./09.txt') as my_input:\r\n compressed_file = my_input.read()\r\n\r\n#Part 1\r\n\r\nmarker = ''\r\nn, repeat = 0, 0\r\nlength = 0\r\nfor char in compressed_file:\r\n if n:\r\n n -= 1\r\n else:\r\n marker += char\r\n if ')' in marker:\r\n n, repeat = map(int, marker[1:-1].split('x'))\r\n length += n * repeat\r\n marker = ''\r\n\r\nprint(length)\r\n\r\n#Part 2\r\n\r\nmarker = ''\r\nm = False\r\nn, repeat = [], []\r\nlength, counter = 0, 0\r\nfor char in compressed_file:\r\n if char in ('(', ')'):\r\n m = True if char == '(' else False\r\n n = [i-1 for i in n]\r\n if char == ')':\r\n n.append(int(marker.split('x')[0]))\r\n repeat.append(int(marker.split('x')[1]))\r\n marker = ''\r\n elif m:\r\n marker += char\r\n n = [i-1 for i in n]\r\n else:\r\n counter += 1\r\n if n[-1] == 1:\r\n length += counter * math.prod(repeat)\r\n counter = 0\r\n n = [i-1 for i in n]\r\n while n:\r\n if not n[-1]:\r\n n.pop()\r\n repeat.pop()\r\n else:\r\n break\r\n\r\nprint(length)","repo_name":"marin1401/aoc2016","sub_path":"09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15212268745","text":"import pandas as pd\nfrom datetime import datetime\nfrom squareup_api.square_utils import SquareClient, SquareCatalogUtils\n\n\nclass DfItemLib:\n \"\"\"\n item library template in Pandas dataframe\n \"\"\"\n df_item_lib = pd.DataFrame({\n \"item_header\": [\"item_header_test\", \"item_header_test_2\"] * 2,\n \"variation_id\": [\"XXX\"] * 4,\n \"variation_name\": [\"variation_name_test\", \"variation_name_test_2\",\n \"variation_name_test_3\", \"variation_name_test_4\"],\n \"item_id\": [\"XXX\"] * 4,\n \"item_name\": [\"item_name_test\", \"item_name_test_2\"] * 2,\n \"category_id\": [\"XXX\"] * 4,\n \"category_name\": [\"category_name_test\", \"category_name_test_2\"] * 2,\n \"variation_amount\": [\"100\", \"101\"] * 2,\n \"variation_currency\": [\"USD\"] * 4,\n \"item_description\": [f\"test item, created at {datetime.now()}\"] * 4,\n \"variation_id_inner\": [\"#XXXX#\"] * 4\n })\n\n\nclass ListPlan:\n plan_as_dict_test: dict\n\n def __init__(self):\n square_client = SquareClient.create(env=\"test\")\n catalog_util = SquareCatalogUtils(square_client)\n # catalog_util = SquareCatalogUtils()\n # catalog_util.create_item_library(DfItemLib.df_item_lib)\n item_variations = catalog_util.get_item_catalogs().get(\"item_variation\")\n # variation_ids = catalog_util.df_item_lib.variation_id.values\n # print(variation_ids)\n variation_ids = [obj.get(\"id\") for obj in item_variations \\\n if obj.get(\"item_variation_data\").get('pricing_type') == 'FIXED_PRICING']\n\n # transaction_plan (hard coded for testing)\n plan_as_dict_test = {'2020-10-10 7:00:00':\n [(302,[variation_ids[0], variation_ids[3], variation_ids[3]]),\n (201,[variation_ids[2], variation_ids[1]]),\n (402,[variation_ids[1], variation_ids[1],variation_ids[2], variation_ids[0]])]}\n self.plan_as_dict_test = plan_as_dict_test\n","repo_name":"kefeimo/sam-square-github-actions","sub_path":"squareup_api/tests/data_test.py","file_name":"data_test.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23530574571","text":"# define a function with 2 parameters\ndef add_numbers(number1, number2):\n print(int(number1) + int(number2))\n\nprint(\"adding 1 + 2 gives\")\n# below is to define the parameters\nadd_numbers(1, 2)\n# what if you don't give enough define, it will tell you missing positional argument\nadd_numbers(1)\n# what if you give more parameter then set (it will 2 positional argument but 3 is given\nadd_numbers(1, 2, 3)","repo_name":"kayanpang/champlain181030","sub_path":"week5-2_functions_181129/function_2-parameters.py","file_name":"function_2-parameters.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2848716703","text":"import cv2\nimport numpy as np\n\ndef preprocess(qPart2):\n\n qPart2Warp = cv2.resize(qPart2, (600, 89))\n qPart2Warp[:, 0:6] = 255\n qPart2Warp[:, -30:] = 255\n qPart2Warp[-10:,:] = 255\n qPart2Warp = qPart2Warp[25:-5,4:584]\n\n\n return qPart2Warp\n\n\n# Preprocessing for contour detection\ndef contour_prepocessing(qPart2Warp, erode_iteration):\n imgG = cv2.cvtColor(qPart2Warp, cv2.COLOR_BGR2GRAY)\n img1 = cv2.GaussianBlur(imgG, (5,5), 1)\n imgC = cv2.threshold(img1, 242, 255, cv2.THRESH_BINARY)[1]\n kernel = np.ones((2, 2))\n imgD = cv2.dilate(imgC, kernel, iterations=1)\n imgE = cv2.erode(imgD, kernel, iterations=erode_iteration)\n\n return imgG, imgE","repo_name":"vishnu0179/OMR-Reader","sub_path":"omr/qpart2.py","file_name":"qpart2.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23544976832","text":"# -*- coding: utf-8 -*-\n\"\"\"\nlast modified: apr 20, 2023\n\n@author: katie\n\ndescription:\n functions from class for rod sets made R-friendly\n \n the Rodset class contains defining information for a group of rods.\n \n initiated with a dictionary of counts or list of rods in the set.\n \n represented by list (basic) descriptor.\n rodsets with the same basic representation are equivalent.\n \n attributes:\n basic - list of rod lengths\n counts - dict listing rod lengths (e.g. [2,3] = {1:0, 2:1, 3:1})\n coefs - list of coefs for the rod polynomial\n roots - all the roots (absolute value, rounded to 10 places)\n growth - max root\n order - degree of polynomial\n fullpoly - the full polynomial, written out\n facpoly - the full polynomial factored\n \n methods:\n init, repr, eq\n copy - deep copy\n spotcon (helper) - switch between list and dictionary representations\n coefcon (helper) - switch between polynomial string and coef list\n \n dependencies:\n factor_list() from sympy\n round() and abs() from numpy\n polyroots() and polydiv() from numpy.polynomial.polynomial\n \n\"\"\"\n# dependencies\nfrom sympy import factor_list\nfrom numpy import round as np_round\nfrom numpy import abs as np_abs\nfrom numpy.polynomial.polynomial import polyroots\nfrom numpy.polynomial.polynomial import polydiv\n\nclass Rodset:\n \"\"\" class for set of cuisenaire rods\n must input one of below as represent:\n string (str): string containing number of rods at each length\n group (list): list of positive integer lengths of rods in set\n \n e.g. the padovans would be [2,3] or '011'. note [2,2] is '02'\n \"\"\"\n \n ########## helpers\n # helper: string to basic converter\n @staticmethod\n def spotcon(rep):\n ''' changes a dictionary representaion of a rodset to a list\n or vice versa.\n \n note:\n - assumes rep is already correctly one or the other\n - [1,-3,-3,4] <-> {1:1, 2:0, 3:-2, 4:1}\n '''\n \n # dict to list\n if isinstance(rep, dict):\n converted = []\n for key in rep:\n converted.extend([key if rep[key] > 0 else -key for \\\n x in range(abs(rep[key]))])\n \n # list to dict\n elif isinstance(rep, list):\n converted = {}\n for i in range(abs(max(rep, key = abs))):\n pos = rep.count(i + 1)\n neg = rep.count(-(i + 1))\n if pos >= neg:\n converted[i + 1] = pos\n else:\n converted[i + 1] = -neg\n \n return converted\n \n \n # helper: string to basic converter\n @staticmethod\n def coefcon(poly):\n ''' changes a string representaion of a polynomial to a list of coefs\n or vice versa.\n \n note:\n - assumes rep is already correctly one or the other\n - poly strings must be fully expanded\n '''\n \n # string to list\n if isinstance(poly, str):\n # find degrees and coefficients\n power = {}\n sign = 1\n poly = poly.replace('^', '**')\n for t in poly.split():\n if t == '-':\n sign = -1\n elif t == '+':\n sign = 1\n else:\n term = t.split('*')\n deg, coef = 1, 1\n if '' in term:\n deg = int(term[-1])\n elif 'x' not in term:\n deg = 0\n if term[0] != 'x':\n coef = int(term[0])\n power[deg] = coef * sign\n # convert to coefficient list\n converted = [0] * (max(power.keys()) + 1)\n for i in range(len(converted)):\n if i in power.keys():\n converted[i] = power[i]\n \n # list to string\n elif isinstance(poly, list):\n converted = ''\n top = len(poly) - 1\n if top == 0:\n converted = str(poly[0])\n else:\n # iterate through coefficients\n for deg, coef in enumerate(poly):\n term = ''\n # handle coefficient\n if coef == 1:\n term += ' + '\n elif coef == -1:\n term += ' - '\n elif coef > 0:\n term += f\" + {coef}*\"\n elif coef < 0:\n term += f\" - {abs(coef)}*\"\n # handle degree\n if coef != 0:\n if deg == 0:\n if abs(coef) == 1:\n term += '1'\n else:\n term = term[:-1]\n elif deg == top:\n if coef == 1:\n if deg == 1:\n term = \"x\"\n else:\n term = f\"x^{deg}\"\n elif coef == -1:\n if deg == 1:\n term == \"-x\"\n else:\n term = f\"-x^{deg}\"\n else:\n term = f\"{coef}*x^{deg}\"\n elif deg == 1:\n term += 'x'\n elif deg < top:\n term += f\"x^{deg}\"\n \n # add to converted\n converted = term + converted\n \n return converted\n \n \n ########## class methods\n # initialize\n def __init__(self, represent):\n ''' intitalizes from a descriptive attribute (represent),\n either list of lengths (basic) or bit string (string).\n \n also initializes:\n coefs\n roots\n growth\n order\n fullpoly\n facpoly\n \n REMOVED:\n minimal\n shift\n \n note: ONLY works for rod polys.\n '''\n \n # find which argument was given and fill others\n # also raise errors for invalid values and types\n if isinstance(represent, dict):\n if all(isinstance(x, int) for x in represent.keys()):\n basic = self.spotcon(represent)\n counts = self.spotcon(basic)\n else:\n raise ValueError('string representation must be numeric')\n \n # list of rod lengths\n elif isinstance(represent, list):\n if all(isinstance(x, int) for x in represent):\n represent.sort(key = abs)\n basic = represent\n counts = self.spotcon(represent)\n else:\n raise ValueError('list representation must contain ints')\n \n # neither\n else:\n raise TypeError('representation must be a list or string')\n \n # initialize attributes\n # given attributes\n self.counts = counts\n self.basic = basic\n \n # polynomials\n # coefficients\n coefs = \\\n [-counts[x + 1] for x in range(max(counts.keys()))][::-1] + [1]\n self.coefs = coefs\n \n # growth rate\n roots = np_round(np_abs(polyroots(coefs)), 10)\n growth = max(roots)\n order = len(coefs) - 1\n self.roots = roots\n self.growth = growth\n self.order = order\n \n # full polynomial\n fullpoly = self.coefcon(coefs)\n self.fullpoly = fullpoly\n \n # factored\n facpoly = [(str(x[0]).replace('**', '^'), x[1]) for x in \\\n factor_list(fullpoly)[1]]\n self.facpoly = facpoly\n \n \n # represent\n def __repr__(self):\n ''' basic (list) representation.\n '''\n \n return str(self.basic)\n \n \n # equal\n def __eq__(self, other):\n ''' rodsets are the same if they have the same list of rods.\n if other is not Rodset, return False.\n '''\n \n if not isinstance(other, Rodset):\n return False\n \n if self.basic == other.basic:\n return True\n else:\n return False\n \n \n # (deep) copy\n def copy(self):\n ''' takes: self\n returns: a deep copy of self\n \n note that this carries over any manually-created quirks.\n '''\n \n new = Rodset('1')\n \n for key in self.__dict__:\n new.__dict__[key] = self.__dict__[key]\n \n return new\n","repo_name":"ktllee/plug_problem","sub_path":"cuisenaire/trees/rods_r.py","file_name":"rods_r.py","file_ext":"py","file_size_in_byte":9162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21188049941","text":"import os\nimport sys\nimport time\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession\nfrom graphframes import *\nfrom itertools import combinations\n\nos.environ[\"PYSPARK_SUBMIT_ARGS\"] = \"--packages graphframes:graphframes:0.8.2-spark3.1-s_2.12 pyspark-shell\"\n\ndef getEdges(rdd_graph_group, filter_thres):\n dict_graph = dict(rdd_graph_group.collect())\n list_nodes = sorted(dict_graph.keys())\n list_edges = list()\n set_vertex = set()\n for comb in combinations(list_nodes, 2):\n set_bus_a = dict_graph[comb[0]]\n set_bus_b = dict_graph[comb[1]]\n num_co_bus = len(set_bus_a & set_bus_b)\n if num_co_bus >= filter_thres:\n list_edges.append(comb)\n reverse_comb = (comb[1], comb[0])\n list_edges.append(reverse_comb)\n set_vertex.add(comb[0])\n set_vertex.add(comb[1])\n\n return list(set_vertex), list_edges\n\n\ndef writeOutput(list_comm, output_file_path):\n with open(output_file_path, 'w+') as o:\n o.write('\\n'.join([str(tup).strip('[').strip(']') for tup in list_comm]))\n o.close()\n\n\nif __name__ == '__main__':\n time_start = time.time()\n\n # input params\n filter_thres = float(sys.argv[1])\n input_file_path = sys.argv[2]\n output_file_path = sys.argv[3]\n\n # filter_thres = 7\n # input_file_path = 'input/ub_sample_data.csv'\n # output_file_path = 'output/output1.txt'\n\n # set spark\n spark_config = SparkConf().setMaster('local').setAppName('task1').set('spark.executor.memory', '4g').set(\n 'spark.driver.memory', '4g')\n sc = SparkContext(conf=spark_config)\n sc.setLogLevel('OFF')\n ss = SparkSession(sc)\n\n # read data into RDD\n rdd_raw = sc.textFile(input_file_path)\n rdd_header = rdd_raw.first()\n rdd_graph = rdd_raw.filter(lambda item: item != rdd_header)\n\n # group by userid\n rdd_graph_group = rdd_graph. \\\n map(lambda r: (r.split(',')[0], r.split(',')[1])). \\\n groupByKey(). \\\n map(lambda item: (item[0], set(item[1])))\n\n # get list of nodes and edges\n list_nodes, list_edges = getEdges(rdd_graph_group, filter_thres)\n\n # convert list of nodes and edges to Spark DF\n list_nodes_tup = [(node,) for node in list_nodes]\n df_nodes = sc.parallelize(list_nodes_tup).toDF(['id'])\n df_edges = sc.parallelize(list_edges).toDF(['src', 'dst'])\n\n # find communities using GraphFrames\n graph = GraphFrame(df_nodes, df_edges)\n result_comm = graph.labelPropagation(maxIter=5)\n\n # read communities to rdd -> group\n rdd_comm = result_comm.rdd.\\\n map(lambda r: (r[1], r[0])).\\\n groupByKey().\\\n map(lambda r: sorted(list(r[1]))).\\\n sortBy(lambda r: (len(r), r))\n\n # write results\n writeOutput(rdd_comm.collect(), output_file_path)\n\n print('Duration: ', time.time() - time_start)\n\n","repo_name":"bvorapoom/usc_apds","sub_path":"553_Data Mining/Homework4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2818844253","text":"#!/usr/bin/python3\nimport subprocess\nimport tempfile\nimport sys\nimport os\n\nDEFAULT_TEXT=\"brother %s, looking correct!\" % os.environ['USER']\n\ndef get_text_bubble(inputStr=None):\n fp = tempfile.TemporaryFile()\n cmd = 'cowsay -f {}/blank.cow '.format(os.path.dirname(os.path.realpath(__file__)))\n stdinput=\"\"\n \n if inputStr != None:\n cmd += DEFAULT_TEXT\n else:\n cmd += \" \".join(sys.argv[1:])\n p = subprocess.call(cmd, stdin=sys.stdin, stdout=fp, shell=True)\n \n fp.seek(0)\n s = fp.readlines()\n fp.close()\n return [ line.decode('utf-8') for line in s ]\n\ndef get_melanyeet():\n with open(os.path.dirname(os.path.realpath(__file__)) + '/melanyeet.cow') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n line = lines[i]\n return lines\n\ndef combine(textbubble, melanyeet):\n targetRow = 16\n textRows = len(textBubble)\n topHalf = textRows // 2\n botHalf = textRows - topHalf\n\n if (targetRow - topHalf >= 0 and targetRow + botHalf < len(melanyeet)):\n for i in range(targetRow - topHalf, targetRow + botHalf):\n j = i - (targetRow - topHalf)\n melanyeet[i] = melanyeet[i][:-1] + textbubble[j].rstrip() + \"\\n\"\n else:\n melanyeet = [\" \"*41 for _ in range(len(textbubble) - len(melanyeet))] + melanyeet\n for i in range(0, len(textbubble)):\n melanyeet[i] = melanyeet[i][:-1] + textbubble[i].strip() + \"\\n\"\n return \"\".join(melanyeet)\n\n\nif __name__ == \"__main__\":\n textBubble = get_text_bubble(DEFAULT_TEXT) if len(sys.argv) > 1 and sys.argv[1] == \"--brother\" else get_text_bubble()\n melanyeet = get_melanyeet()\n output = combine(textBubble, melanyeet)\n print(output)\n","repo_name":"qwewy/melanyeet","sub_path":"melanyeet.py","file_name":"melanyeet.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"1551364627","text":"from tkinter import StringVar, ttk\nimport tkinter as tk\n\nclass CustomComboBox():\n\n\n def __init__(self, parent, list, index_value):\n\n ddl = []\n self.ddl = ddl\n\n self.ddl_value = None\n\n id = 0\n self.id = id\n\n self.index_value = index_value\n\n self.index_number = None\n\n self.parent = parent\n\n self.list = list\n\n for i in self.list:\n self.ddl.append(i[1])\n\n self.base_layout()\n\n\n def get_value(self, variable):\n self.ddl_value = variable.get()\n\n for i in self.list:\n if(i[1]) == variable.get():\n self.id = i[0]\n\n self.index_number = self.retrieve_index()\n print(self.index_number)\n\n\n def retrieve_index(self):\n index = self.ddl.index(self.index_value)\n return index\n\n def base_layout(self):\n variable = StringVar()\n\n combobox = ttk.Combobox(self.parent, value=self.ddl, textvariable = variable)\n combobox.bind('<>', self.get_value(variable))\n combobox.set('PLEASE SELECT A COMPANY')\n combobox.grid(row=2, column=1, pady=5, padx=15, sticky=tk.E+tk.W)","repo_name":"amanoli11/ams","sub_path":"views/custom_combo_box.py","file_name":"custom_combo_box.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41412231209","text":"# This class will read excel input files, retrieve info from them, and create a CSV file with the new format.\n#\n# https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html\n\nimport column_constants as cc\nimport logging\n\nimport donor_file_reader\nimport lgl_api\n\nSAMPLE_FILE = 'sample_files\\\\benevity.csv'\nlog = logging.getLogger()\n\nGOOD_PAYMENT_STATUS = 'Cleared'\n\n\n# This class will process donations from YourCause.\n# self.input_data is declared by the __init__ module of donor_file_reader. In this module, it will be a list\n# similar to the sample below:\n#\n# [['Id', 'Amount', 'GrossAmount', 'CheckFeeDetails CheckFee', 'CheckFeeDetails PercentWithheld',\n# 'CheckFeeDetails CapApplied', 'Currency', 'IsAch', 'DateCreated', 'PaymentNumber', 'PaymentStatus',\n# 'PaymentStatusDate', 'ExternalSystemTypeName', 'PaymentSubStatus', 'CheckReissueRequestedDate',\n# 'HasCheckReissueRequest', 'CheckReissueStatusId', 'CheckReissueStatusDate', 'CheckReissueRejectionReasonId',\n# 'CheckReissueRejectionReason', 'CheckReissueRejectionComment', 'IsEligibleForCheckReissueRequest',\n# 'PaymentType Id', 'PaymentType Name', 'PaymentType Description', 'ReissuePaymentId', 'ReissuePaymentNumber',\n# 'ProcessingPartnerName'],\n# ['12192042', '650', '650', '0', '', '', 'usd', 'TRUE', '4/6/2022 0:00', '1270221727', 'Cleared',\n# '4/6/2022 6:38', 'CSRconnect', '', '', 'FALSE', '', '', '', '', '', 'FALSE', '3', 'ACH', '', '', '',\n# 'The Blackbaud Giving Fund'],\n# ['12192043', '50', '50', '0', '', '', 'usd', 'TRUE', '4/6/2022 0:00', '1270227583', 'Cleared',\n# '4/6/2022 6:38', 'CSRconnect', '', '', 'FALSE', '', '', '', '', '', 'FALSE', '3', 'ACH', '', '', '',\n# 'The Blackbaud Giving Fund'],\n# ['11342850', '40', '40', '0', '', '', 'usd', 'FALSE', '6/24/2021 0:00', '7200305060', 'Voided',\n# '11/3/2021 23:01', 'CSRconnect', '', '10/25/2021 16:19', 'TRUE', '30', '10/25/2021 16:19', '', '', '',\n# 'FALSE', '1', 'Check', '', '', '', 'The Blackbaud Giving Fund'],\n# ['11336329', '125', '125', '0', '', '', 'usd', 'FALSE', '6/23/2021 0:00', '4230012430', 'Cleared',\n# '8/10/2021 0:00', 'CSRconnect', '', '', 'FALSE', '', '', '', '', '', 'FALSE', '1', 'Check', '', '', '',\n# 'The Blackbaud Giving Fund'],\n#\n# Note that the Payment Status of most rows is \"Cleared\". The third row's Payment Status in \"Voided\". The\n# Payment Status MUST say \"Cleared\" to be included in the donor data.\n#\nclass DonorFileReaderYourCause(donor_file_reader.DonorFileReader):\n # The initialize_donor_data method will store the donation in a dict called self.donor_data. This is very similar\n # to the process used in DonorFileReaderBenevity. The format of the dict will be:\n #\n # {column_name_1 {0: , 1: , ...}, column_name_2 ...}\n #\n # The input data is in the format:\n # ['Id', 'Amount', 'Gross Amount', ... ] <-- these are the labels\n # ['12192042', '650', '650', ...], <-- this is the data\n # ['12192043', '50', '50', ...]\n #\n # To convert the input data to the final dict, we will:\n # 1. loop through each of the data rows\n # 2. loop through each label\n # 3. take the index of the entire data row. That will be the key of the row data (0, 1, 2, ...)\n # 4. use the index of the column label. That will be the index of the row value we are adding.\n #\n # Returns - none\n # Side Effect - the self.data_donor property is populated.\n def initialize_donor_data(self):\n log.debug('Entering')\n # Separate the donor data from everything else (exclude the labels).\n donor_rows = self.input_data[1:]\n\n # Initialize the dict from labels (row 0 of the input_data).\n column_labels = self.input_data[0]\n for label in column_labels:\n self.donor_data[label] = {}\n\n # Add the donor rows to the data.\n payment_status_index = column_labels.index(cc.YC_PAYMENT_STATUS)\n for row in donor_rows: # Start with a row of donor data e.g. ['Liberty Mutual', 'DANIELS TABLE INC', ...]\n if row[payment_status_index] != GOOD_PAYMENT_STATUS:\n continue\n for label in column_labels: # Now get a label e.g. 'Company'\n row_index = donor_rows.index(row)\n label_index = column_labels.index(label)\n self.donor_data[label][row_index] = row[label_index]\n\n # Return the map to be used by map_keys.\n def get_map(self):\n return cc.YC_MAP\n\n # This method overrides the map_fields method in the parent class. In addition to mapping fields based on\n # self.donor_data, it will set the campaign name, payment type, and gift note.\n #\n # Returns - same as parent method\n def map_fields(self):\n log.debug('Entering')\n output_data = super().map_fields()\n output_data[cc.LGL_CAMPAIGN_NAME] = {}\n indexes = output_data[cc.LGL_CONSTITUENT_ID].keys()\n for index in indexes:\n output_data[cc.LGL_CAMPAIGN_NAME][index] = 'General'\n return output_data\n\n # This method will get the LGL IDs based on the name of the constituent.\n #\n # Returns - a dict of LGL IDs. The keys of the dict will be in the format: {0: id_1, 1: id_2, ...}\n def get_lgl_constituent_ids(self):\n log.debug('Entering')\n lgl = lgl_api.LglApi()\n donor_names = self.donor_data[cc.YC_DONOR_FULL_NAME]\n donor_emails = self.donor_data[cc.YC_DONOR_EMAIL_ADDRESS]\n lgl_ids = {}\n names_found = {} # This is to make the loop more efficient by remembering the IDs of names already found.\n for index in donor_names.keys():\n name = donor_names[index]\n email = donor_emails[index]\n # If the name is found names_found, then retrieve the ID from the dict instead of making a call.\n if name in names_found.keys():\n cid = names_found[name]\n else:\n cid = lgl.find_constituent_id(name=name, email=email, file_name=self.input_file)\n lgl_ids[index] = cid\n names_found[name] = cid\n return lgl_ids\n","repo_name":"limeri/daniels_table","sub_path":"donor_update/donor_file_reader_yourcause.py","file_name":"donor_file_reader_yourcause.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"476462193","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nMIN_PAGES = 7\nMAX_PAGES = 250\nMAIN_DOMAIN = f\"https://xe.chotot.com/\"\n\n\nFILE_PATH = r\"/home/duyanh/Documents/VS_WorkSpace/projects/programming_for_DS_v1/data/moto_raw.csv\"\n\n\ndef check_none(info, main_info_text):\n if info:\n return info.text.replace(\" \", \"\")\n return \"Null\"\n\n\ndef scrape_data():\n for i in range(MIN_PAGES, MAX_PAGES):\n mainURL = f\"{MAIN_DOMAIN}/mua-ban-xe-may?page={i}\"\n\n main_text = requests.get(mainURL).text\n soup = BeautifulSoup(main_text, \"lxml\")\n motors = soup.find_all(\n \"li\", class_=\"AdItem_wrapperAdItem__S6qPH AdItem_big__70CJq\")\n\n for motor in motors:\n infoURL = f\"{MAIN_DOMAIN}{motor.a['href']}\"\n\n info_text = requests.get(infoURL).text\n main_info_text = BeautifulSoup(info_text, \"lxml\")\n\n brand = main_info_text.find(\n \"a\", itemprop=\"motorbikebrand\")\n brand = check_none(brand, main_info_text)\n\n reg_year = main_info_text.find(\n \"span\", itemprop=\"regdate\")\n reg_year = check_none(reg_year, main_info_text)\n\n motor_condition = main_info_text.find(\n \"span\", itemprop=\"condition_ad\")\n motor_condition = check_none(motor_condition, main_info_text)\n\n motor_cap = main_info_text.find(\n \"span\", itemprop=\"motorbikecapacity\")\n motor_cap = check_none(motor_cap, main_info_text)\n\n motor_model = main_info_text.find(\n \"a\", itemprop=\"motorbikemodel\")\n motor_model = check_none(motor_model, main_info_text)\n\n km_nums = main_info_text.find(\n \"span\", itemprop=\"mileage_v2\")\n km_nums = check_none(km_nums, main_info_text)\n\n motor_type = main_info_text.find(\n \"span\", itemprop=\"motorbiketype\")\n motor_type = check_none(motor_type, main_info_text)\n\n price = main_info_text.find(\n \"span\", itemprop=\"price\")\n price = check_none(price, main_info_text)\n\n csv_row = f\"{brand},{reg_year},{motor_condition},{motor_cap},{motor_model},{km_nums},{motor_type},{price}\\n\"\n\n print(csv_row)\n print(i)\n # open the file in the write mode\n with open(f'{FILE_PATH}', 'a') as f:\n f.write(csv_row)\n\n\nif __name__ == \"__main__\":\n scrape_data()\n","repo_name":"duyanh213-ml/motocycle_pred","sub_path":"src/_1_data_prep_by_/web_scrape.py","file_name":"web_scrape.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"176247430","text":"from django.shortcuts import render, redirect, get_object_or_404\r\nfrom django.http import HttpResponse\r\nfrom django.utils import timezone\r\nfrom formtools.wizard.views import SessionWizardView, NamedUrlSessionWizardView\r\nfrom django.urls import reverse\r\n\r\nfrom .models import Post\r\nfrom .forms import PostForm\r\n# Create your views here.\r\ndef post_list(request):\r\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\r\n return render(request, 'blog/post_list.html', {'posts':posts})\r\n\r\n\r\ndef post_detail(request, pk):\r\n post = get_object_or_404(Post, pk=pk)\r\n\r\n return render(request, 'blog/post_detail.html', {'post': post})\r\n\r\ndef post_new(request):\r\n if request.method==\"POST\":\r\n form = PostForm(request.POST)\r\n if form.is_valid():\r\n post = form.save(commit=False)\r\n post.author = request.user\r\n post.published_date = timezone.now()\r\n post.save()\r\n return redirect('blog:post_detail', pk= post.pk)\r\n else:\r\n form = PostForm()\r\n return render(request, 'blog/post_edit.html', {'form':form})\r\n\r\ndef post_edit(request, pk):\r\n post = get_object_or_404(Post, pk=pk)\r\n if request.method == \"POST\":\r\n form = PostForm(request.POST, instance=post)\r\n if form.is_valid():\r\n post = form.save(commit=False)\r\n post.author = request.user\r\n post.published_date = timezone.now()\r\n post.save()\r\n return redirect('blog:post_detail', pk= post.pk)\r\n else:\r\n form = PostForm(instance=post)\r\n return render(request, 'blog/post_edit.html', {'form':form}) \r\n\r\n# Create your views here.\r\ndef show_message_form_condition(wizard):\r\n # try to get the cleaned data of step 1\r\n cleaned_data = wizard.get_cleaned_data_for_step('contactdata') or {}\r\n # check if the field ``leave_message`` was checked.\r\n return cleaned_data.get('leave_message', True)\r\n\r\nclass ContactWizard(NamedUrlSessionWizardView):\r\n def get_step_url(self, step):\r\n return reverse(self.url_name, kwargs={'step':step})\r\n\r\n def done(self, form_list, **kwargs):\r\n return render(self.request, 'blog/done.html', {\r\n 'form_data': [form.cleaned_data for form in form_list],\r\n })\r\n","repo_name":"ibaran73/Instrument-Calibrations","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30187676283","text":"import pickle\nimport numpy as np\nfrom collections import deque\nimport tensorflow as tf\ntf.get_logger().setLevel(3)\nfrom tensorflow import keras\n\nfrom datetime import datetime\nfrom keras.models import Sequential\nfrom keras.layers import *\nfrom .turtlesim_env_base import TurtlesimEnvBase\nfrom .dqn_single import DqnSingle\n\nclass DqnMulti(DqnSingle):\n def __init__(self,env:TurtlesimEnvBase,id_prefix='dqnm',seed=42):\n super().__init__(env,id_prefix,seed)\n self.SAVE_MODEL_EVERY = 250\n self.NEXT_SAVE_MODEL = self.SAVE_MODEL_EVERY\n # złożenie dwóch rastrów sytuacji aktualnej i poprzedniej w tensor 5x5x10 wejścia do sieci\n def inp_stack(_,last,cur):\n # fa,fd,fc+1,fp+1 ORAZ fo doklejone na końcu\n inp = np.stack([cur[2],cur[3],cur[4],cur[5],last[2],last[3],last[4],last[5],cur[6],last[6]], axis=-1)\n return inp\n # predykcja nagród łącznych (Q) za sterowania na podst. bieżącej i ostatniej sytuacji\n # wytworzenie modelu - sieci neuronowej\n def make_model(self):\n N=self.env.GRID_RES # rozdzielczość rastra\n M=10 # liczba warstw z inp_stack()\n self.model=Sequential()\n self.model.add(Conv3D(filters=2*M,kernel_size=(2,2,M),activation='relu',input_shape=(N,N,M,1)))\n self.model.add(Permute((1,2,4,3)))\n self.model.add(Conv3D(filters=2*M,kernel_size=(2,2,2*M),activation='relu'))\n self.model.add(Permute((1,2,4,3)))\n self.model.add(Conv3D(filters=2*M,kernel_size=(2,2,2*M),activation='relu'))\n self.model.add(Flatten())\n self.model.add(Dense(64,activation='relu')) # (128)\n self.model.add(Dense(self.CTL_DIM,activation=\"linear\")) # wyjście Q dla każdej z CTL_DIM decyzji\n self.model.compile(loss='mse',optimizer=keras.optimizers.Adam(learning_rate=0.001),metrics=[\"accuracy\"])\n # model z osobną gałęzią dla logiki unikania kolizji\n def train_main(self,save_model=True,save_state=True):\n self.target_model=keras.models.clone_model(self.model) # model pomocniczy (wolnozmienny)\n self.replay_memory=deque(maxlen=self.REPLAY_MEM_SIZE_MAX) # historia kroków\n episode_rewards=np.zeros(self.EPISODES_MAX)*np.nan # historia nagród w epizodach\n epsilon=self.EPS_INIT\n step_cnt=0\n train_cnt=0\n current_states={tname:agent.map for tname,agent in self.env.agents.items()} # aktualne sytuacje\n last_states={tname:agent.map for tname,agent in self.env.agents.items()} # poprzednie stytuacje początkowo takie same\n agent_episode={tname:i for i,tname in enumerate(self.env.agents)} # indeks epizodu przypisany do agenta\n episode_rewards[:len(self.env.agents)]=0 # inicjalizacja nagród za epizody\n episode=len(self.env.agents)-1 # indeks ost. epizodu\n to_restart=set() # agenty do reaktywacji\n while episode self.NEXT_SAVE_MODEL: # zapisuj co 250 epizodów gdy jest ustawiona flaga\n current_timestamp_ms = datetime.now().strftime(\"%d_%m__%H_%M_%S\")\n self.model.save(f\"models/model-M{episode}-{current_timestamp_ms}.tf\", save_format=\"tf\") # zapisz model w formacie h5\n with open(f\"models/model-E{episode}-{current_timestamp_ms}.config\", \"w+\") as config_file:\n config_file.write(self.xid())\n self.NEXT_SAVE_MODEL += self.SAVE_MODEL_EVERY\n\n self.env.reset(to_restart,['random' for i in to_restart]) # inicjalizacja wybranych\n for tname in to_restart: # odczytanie sytuacji\n current_states[tname]=self.env.agents[tname].map # początkowa sytuacja\n last_states[tname]=[i.copy() for i in current_states[tname]] # zaczyna od postoju: poprz. stan taki jak obecny\n episode+=1 # dla niego to nowy epizod\n episode_rewards[episode]=0 # inicjalizacja nagród w tym epizodzie\n agent_episode[tname]=episode # przypisanie agenta do epizodu\n to_restart=set()\n controls={} # sterowania poszczególnych agentów\n for tname in self.env.agents: # poruszamy każdym agentem\n if np.random.random()>epsilon: # sterowanie wg reguły albo losowe\n controls[tname]=np.argmax(self.decision(self.model,last_states[tname],current_states[tname]))\n print('o',end='')\n else:\n controls[tname]=np.random.randint(0,self.CTL_DIM) # losowa prędkość pocz. i skręt\n print('.', end='')\n actions={tname:self.ctl2act(control) for tname,control in controls.items()} # wartości sterowań\n scene=self.env.step(actions) # kroki i wyniki symulacji\n for tname,(new_state,reward,done) in scene.items(): # obsługa po kroku dla każdego agenta\n episode_rewards[agent_episode[tname]]+=reward # akumulacja nagrody\n self.replay_memory.append((last_states[tname],current_states[tname],controls[tname],reward,new_state,done))\n step_cnt+=1\n if len(self.replay_memory)>=self.REPLAY_MEM_SIZE_MIN and step_cnt%self.TRAIN_EVERY==0:\n self.do_train() # ucz, gdy zgromadzono dość próbek\n train_cnt+=1\n if train_cnt%self.UPDATE_TARGET_EVERY==0:\n self.target_model.set_weights(self.model.get_weights()) # aktualizuj model pomocniczy\n print('T',end='')\n else:\n print('t',end='')\n if done:\n to_restart.add(tname)\n # print(f'\\n {len(self.replay_memory)} {tname} E{episode} ',end='')\n # print(f'{np.nanmean(episode_rewards.take(range(episode-self.env.MAX_STEPS-1,episode+1),mode=\"wrap\"))/self.env.MAX_STEPS:.2f} ',end='') # śr. nagroda za krok\n last_states[tname] = current_states[tname] # przejście do nowego stanu\n current_states[tname] = new_state # z zapamiętaniem poprzedniego\n if epsilon > self.EPS_MIN: # rosnące p-stwo uczenia na podst. historii\n epsilon*=self.EPS_DECAY\n epsilon=max(self.EPS_MIN,epsilon) # ogranicz malenie eps\n\n","repo_name":"Rafalini/SIU_2023","sub_path":"src/turtlesim/dqn_multi.py","file_name":"dqn_multi.py","file_ext":"py","file_size_in_byte":7525,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"69800246570","text":"from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom LemmaTokenizer import *\nimport numpy as np\n\ndef genFreqArray(inText, outText):\n\treturn np.subtract(inText.mean(axis=0).A[0], outText.mean(axis=0).A[0])\n\ndef genName(freqArray, vocabValue, vocabIndex):\n\ttenBestI = freqArray.argsort()[-5:][::-1]\n\ttenBest = [vocabValue[vocabIndex.index(i)].encode('ascii', 'ignore') for i in tenBestI]\n\tbiWords = [w for w in tenBest if ' ' in w]\n\tbw = []\n\tfor b in biWords:\n\t\tbw.extend(b.split(' '))\n\treturn [w for w in tenBest if w not in bw][:3]\n\ndef useNLP(summaryFolder):\n\tf = open('data/community.txt','r'); toks = f.readlines(); f.close();\n\n\tclassesArray = []; nodes = [];\n\tfor tok in toks:\n\t\tinfos = tok.split(' ')\n\t\tif len(infos) == 2:\n\t\t\tclassesArray.append(int(infos[1]))\n\t\t\tnodes.append(int(infos[0]))\n\n\tclassesArray = np.array(classesArray)\n\n\ttexts = [];\n\tfor node in nodes:\n\t\tf = open(summaryFolder+str(node)+'.txt', \"r\"); texts.append(f.read()); f.close();\n\tcount_vect = CountVectorizer(tokenizer=LemmaTokenizer(), stop_words='english', ngram_range = (1,2), binary=True)\n\ttotalCount = count_vect.fit_transform(texts)\n\n\ttfidf_trans = TfidfTransformer() #initialize our tfidf transformer\n\ttotalCount = tfidf_trans.fit_transform(totalCount)\n\n\ttotalCount = totalCount.asfptype()\n\n\tvocabValue = count_vect.vocabulary_.keys()\n\tvocabIndex = count_vect.vocabulary_.values()\n\n\tchanged = totalCount.shape[0]\n\troun = 1\n\twhile 1.0*changed > 0.01*totalCount.shape[0]:\n\t\tclassSet = list(set(classesArray))\n\t\tfreqMatrix = []\n\n\t\tfor clas in classSet:\n\t\t\tgoodRows = np.where(classesArray==clas)[0]\n\t\t\tbadRows = np.where(classesArray!=clas)[0]\n\n\t\t\tfreqArray = genFreqArray(totalCount[goodRows, :], totalCount[badRows, :])\n\t\t\tfreqMatrix.append(freqArray) #generating a very cool looking matrix\n\n\t\tallResults = freqMatrix * totalCount.transpose() #I think this is as yolo as it gets: row = each cluster, column = each article's score\n\n\t\tnClassesArray = [classSet[np.argmax(allResults[:,i])] for i in range(0,totalCount.shape[0])]\n\t\tchanged = np.count_nonzero(nClassesArray-classesArray)\n\t\tclassesArray = np.array(nClassesArray)\n\t\troun += 1\n\n\tf = open('data/clusters.txt','w')\n\tfor clas in classSet:\n\t\tgoodRows = np.where(classesArray==clas)[0]\n\t\tbadRows = np.where(classesArray!=clas)[0]\n\t\tfreqArray = genFreqArray(totalCount[goodRows, :], totalCount[badRows, :])\n\t\tscore = 10*float(len(goodRows))/len(classesArray)\n\t\tf.write(str(clas)+'[]'+str(score)+'[]noName[]'+str(len(goodRows))+'\\n')\n\tf.close()\n\n\tf = open('data/community.txt','w')\n\tfor node, member in zip(nodes, classesArray):\n\t\tf.write(str(node)+' '+str(member)+'\\n')\n\tf.close()","repo_name":"tingofurro/wikigraph","sub_path":"clusterer/canlp/s3Nlp.py","file_name":"s3Nlp.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22212531127","text":"class Solution:\n def findLength(self, nums1: List[int], nums2: List[int]) -> int:\n m = len(nums1)\n n = len(nums2)\n max_count = 0\n # i from -4 to 4. so i+j value wil tell which ith and jth ele will overlap and\n #for each ith value we get diff jth value which in i+j < 0 that gives positive value of j,\n for i in range(-n+1,m):\n count = 0\n for j in range(n):\n if i+j<0:\n continue\n elif i+j >=m:\n break\n elif nums1[i+j] == nums2[j]:\n count +=1\n max_count = max(max_count,count)\n else:\n count=0\n return max_count","repo_name":"AyushSingh-github/Leetcode","sub_path":"718-maximum-length-of-repeated-subarray/718-maximum-length-of-repeated-subarray.py","file_name":"718-maximum-length-of-repeated-subarray.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"30495982622","text":"import numpy as np\nfrom numpy.linalg import cholesky\nfrom sklearn import neighbors \nfrom sklearn.metrics import precision_recall_curve \nfrom sklearn.metrics import classification_report \nfrom sklearn.cross_validation import train_test_split \nimport matplotlib.pyplot as plt\n\nsampleN1=11000\nmu1 = np.array([[0, 0]])\nSigma1 = np.array([[1, 0], [0, 1]])\nR1 = cholesky(Sigma1)\ns1 = np.dot(np.random.randn(sampleN1, 2), R1) + mu1\n\nsampleN2=11000\nmu2 = np.array([[3, 3]])\nSigma2 = np.array([[2, 0], [0, 2.5]])\nR2 = cholesky(Sigma2)\ns2 = np.dot(np.random.randn(sampleN2, 2), R2) + mu2\n\n#ss1=np.hstack((s1[1000:],[[0]]*10000))\n#ss2=np.hstack((s2[1000:],[[1]]*10000))\n\nx=np.vstack((s1[1000:],s2[1000:]))\ny=[0]*10000+[1]*10000\n#y=np.vstack(([[0]]*10000,[[1]]*10000))\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2) \n\nh = .01 \nx_min, x_max = x[:, 0].min() - 0.1, x[:, 0].max() + 0.1 \ny_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1 \nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), \n np.arange(y_min, y_max, h))\n#k value\nclf = neighbors.KNeighborsClassifier(n_neighbors=1) \nclf.fit(x_train, y_train)\n\nprecision, recall, thresholds = precision_recall_curve(y_train, clf.predict(x_train)) \nanswer = clf.predict_proba(x)[:,1] \nprint(classification_report(y, answer, target_names = ['Gauss1', 'Gauss2'])) \n\nanswer = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1] \nz = answer.reshape(xx.shape) \nplt.contourf(xx, yy, z, cmap=plt.cm.Paired, alpha=0.8) \nplt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap=plt.cm.Paired) \nplt.show() \n\n#random.shuffle(list) \n#noise\n","repo_name":"weilong0127/ML-group","sub_path":"L/123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"8593340824","text":"import matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nfrom multiprocessing import *\nimport time\nimport os\nimport pickle\nimport numpy as np\nimport serial\nimport threading\nimport pandas as pd\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\ncollection = []\n'''\nGeneral description:\n\nRealTimSigAni class is to seperate the graph into 3 chunks. and take the chunk with the left red line between the two yellow lines\nin the middle as the current chunk to calculate the speed.\nand when it receives one data(float or int) it updates its graph\n\nFor rest of the code\n\nBasically, you should establish two virtual series(in ubuntu I use $socat -d -d pty pty to generate it) called input one and output one and build connections between them.\nTake the input one as a process, and output one as a another, send the neural data using pickle from input one to the output\none so that you could get the python subject.\n\n'''\n\n\nclass RealTimeSigAni(object):\n\n def __init__(self, fig, ax, time_resolution, index_interval, ymax):\n '''\n Initializing a basic framework of this animation\n Parameters:\n -----------------------------------------------------\n --time_resolution: resolution of the data\n --index_interval: the interval for extracting information\n '''\n self.signal_array = []\n self.time_resolution = time_resolution\n self.window_range = ([0, 3 * index_interval])\n self.window_length = 3 * index_interval\n self.index_interval = index_interval\n self.fig = fig\n self.ax = ax\n self.speed = 0\n self.speed_change_flag = False\n # set the general graph of the signal window\n self.ax.set_xlim(self.window_range)\n self.ax.set_ylim([0, ymax])\n self.ax.invert_xaxis()\n # draw a red window for the processed information\n self.ax.axvline(index_interval, color='red')\n self.ax.axvline(2 * index_interval, color='red')\n # draw the signal curve to manifest the signal\n self.signal_curve, = ax.plot([], [], 'b-')\n self.ax.set_ylabel('firing amplitude')\n self.ax.set_xlabel('time')\n self.ax.set_xticks([index_interval, 2 * index_interval])\n self.speed_display = self.ax.text(0.9, 0.9, 'Speed:0', horizontalalignment='center', verticalalignment='center',\n transform=ax.transAxes)\n # use three yellow lines to distringuish different chunks\n self.line1 = self.ax.axvline(0, color='yellow')\n self.line2 = self.ax.axvline(0, color='yellow')\n self.line3 = self.ax.axvline(0, color='yellow')\n self.line_collection = []\n\n def __call__(self, data):\n # every time a data reaches, update the grpah by phase 1 or phase 2,because they got different\n self.signal_array.append(data)\n if len(self.signal_array) <= self.window_range[1]:\n return self.phase_1_plot()\n else:\n if len(self.signal_array) > 8 * self.index_interval:\n self.signal_array = self.signal_array[-(self.window_length + 5):]\n # here plus 5 is to avoid to fall in condition (2 lines above)\n return self.phase_2_plot()\n\n def phase_1_plot(self, speed_method=np.sum):\n # undone:change ylim\n length = len(self.signal_array)\n # print(self.signal_array)\n self.signal_curve.set_xdata(np.arange(1, 1 + length))\n self.signal_curve.set_ydata(self.signal_array[::-1])\n self.speed_change_flag = False\n if length == 1:\n self.line_collection.append(self.line1)\n\n if length == self.index_interval:\n self.speed = speed_method(\n self.signal_array[-self.index_interval:])\n self.speed_display._text = 'Speed {0}'.format(self.speed)\n self.speed_change_flag = True\n\n if length == self.index_interval + 1:\n self.line_collection.append(self.line2)\n\n if length == 2 * self.index_interval:\n self.speed = speed_method(\n self.signal_array[-self.index_interval:])\n self.speed_display._text = 'Speed {0}'.format(self.speed)\n self.speed_change_flag = True\n if length == 2 * self.index_interval + 1:\n self.line_collection.append(self.line3)\n\n for line in self.line_collection:\n line.set_xdata(line._x[1] + 1)\n self.ax.draw_artist(line)\n self.ax.draw_artist(self.signal_curve)\n self.ax.autoscale_view()\n self.fig.canvas.flush_events()\n return self.speed, self.speed_change_flag\n\n def phase_2_plot(self, speed_method=np.sum):\n self.speed_change_flag = False\n for line in self.line_collection:\n if line._x[1] == self.window_range[1]:\n line.set_xdata(1)\n self.speed = speed_method(self.signal_array[-self.index_interval:])\n self.speed_display._text = 'Speed {0}'.format(self.speed)\n self.ax.draw_artist(self.speed_display)\n self.ax.draw_artist(line)\n self.speed_change_flag = True\n else:\n line.set_xdata(line._x[1] + 1)\n self.ax.draw_artist(line)\n self.signal_curve.set_ydata((self.signal_array[-self.window_range[1]:])[::-1])\n self.ax.draw_artist(self.signal_curve)\n self.ax.autoscale_view()\n self.fig.canvas.flush_events()\n return self.speed, self.speed_change_flag\n\n\ndef Transmit(ser, neural_data, slow_ratio, recording_start=0, time_resolution=0.001):\n current_spiking = recording_start\n start_time = time.time()\n time_interval = time_resolution * slow_ratio\n while (True):\n pickle.dump(neural_data[current_spiking, :], ser)\n current_spiking += 1\n time.sleep(time_interval - ((time.time() - start_time) % time_interval))\n\n\ndef Output(ser, slow_ratio, time_resolution, index_interval, ymax):\n file_name = 'robot.txt'\n file_robot = open(file_name, 'w')\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n plt.ion()\n animation = RealTimeSigAni(fig, ax, 0.1, index_interval, ymax)\n fig.canvas.draw()\n fig.show()\n collection = []\n start_time = time.time()\n time_interval = slow_ratio * time_resolution\n while (True):\n spiking = pickle.load(ser)\n speed, speed_change_flag = animation(sum(spiking))\n # multiprocessing went wrong here, I think it's the problem of matplotlib and multiprocessing and threads faila on the multiprocessing modele.\n # animation_thread = Process(target=animation, args=(sum(spiking),))\n # animation_thread.start()\n if speed_change_flag:\n thread_robot = threading.Thread(target=file_robot.write, args=('the speed is {0}\\n'.format(speed),))\n thread_robot.start()\n time.sleep(time_interval - ((time.time() - start_time) % time_interval))\n\n\nif __name__ == '__main__':\n try:\n with open('../data/dish_5_experiment_37_100000-110000ms.obj','rb') as f:\n neural_data = pickle.load(f)\n neural_data = neural_data.as_matrix()\n time_resolution = 0.001\n index_interval = 100 # The datapoints in each chunk\n slow_ratio = 20 # the times that we animate our signals\n recording_start = 0 # the correspondent time points that we choose to animate our dataset\n ymax = 700 # the max of the yaxis inploting\n ser_in = serial.Serial('/dev/pts/22', write_timeout=100) # these two is the virtual series\n ser_out = serial.Serial('/dev/pts/23', baudrate=19200)\n\n pro_in = Process(target=Transmit, args=(ser_in, neural_data, slow_ratio, recording_start, time_resolution,))\n pro_out = Process(target=Output, args=(ser_out, slow_ratio, time_resolution, index_interval, ymax,))\n pro_in.start()\n pro_out.start()\n\n except KeyboardInterrupt:\n ser_in.close()\n ser_out.close()\n # file_handle.close()\n","repo_name":"crcali/BrainBot","sub_path":"AnimationCode/real_time_signal_animation.py","file_name":"real_time_signal_animation.py","file_ext":"py","file_size_in_byte":8045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"73412513448","text":"import grpc\nimport demo_pb2_grpc\nimport demo_pb2\n\n\ndef run():\n with grpc.insecure_channel('127.0.0.1:8888') as channel:\n stub = demo_pb2_grpc.DemoStub(channel)\n\n work = demo_pb2.Work()\n work.num1 = 100\n work.num2 = 60\n work.op = demo_pb2.Work.ADD\n\n ret = stub.Calculate(work)\n # ret -> Result\n print(ret.val)\n\n\nif __name__ == '__main__':\n run()","repo_name":"monster6699/monster-web-serve","sub_path":"common/rpc/demo/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"3227643615","text":"import argparse\nimport json\nimport logging\nimport time\n\nfrom sanic import Sanic\nfrom sanic.exceptions import InvalidUsage\nfrom sanic.response import json\n\nfrom tipsi_tools.tipsi_logging import setup_logger\n\nfrom service.functions import ackermann, factorial, fibonacci\n\napp = Sanic()\nsetup_logger('interview_service')\nlog = logging.getLogger('service')\n\n\ndef validate_int(n, _min, _max):\n return _max >= n >= _min\n\n\ndef validate_params(*args):\n for params in args:\n if not validate_int(*params):\n nums = ', '.join([str(x[0]) for x in args])\n log.warning(f'Param is out of range: {nums}')\n raise InvalidUsage(json.dumps({'error': f'invalid number: {params[0]}'}))\n\n\ndef generic_response(what):\n \"\"\"\n we're converting int's to string due json limitations for int values\n \"\"\"\n return json({'response': str(what)})\n\n\n@app.middleware('request')\ndef before_request(request):\n request.headers['start_time'] = time.time()\n\n\n@app.middleware('response')\ndef after_request(request, response):\n total_time = time.time() - request.headers['start_time']\n extra = {'total_time': total_time}\n log.info(f'Request served {total_time:.4}', extra=extra)\n\n\n@app.route(\"/fibonacci//\")\nasync def fibonacci_view(request, n):\n \"\"\"\n We're limiting n to 400 to not deal with stack things.\n In general, because of caching we can do bigger numbers in several attempts\n or increase by calling sys.setrecursionlimit\n \"\"\"\n validate_params([n, 0, 400])\n log.debug(f'Calculate fibonacci for {n}')\n return generic_response(fibonacci(n))\n\n\n@app.route(\"/ackermann///\")\nasync def ackermann_view(request, m, n):\n \"\"\"\n We don't want to deal with deeply nested recursion stack for this particular task\n So we're doing quite restricted subset of caluclations: 0 <= m <= 3 and 0 <= n <= 4\n \"\"\"\n validate_params([m, 0, 3], [n, 0, 4])\n log.debug(f'Calculate ackermann for {m}, {n}')\n return generic_response(ackermann(m, n))\n\n\n@app.route(\"/factorial//\")\nasync def factorial_view(request, n):\n \"\"\"\n We're limiting values to 0 <= n <= 400 to make response time acceptable\n \"\"\"\n validate_params([n, 0, 400])\n log.debug(f'Calculate factorial for {n}')\n return generic_response(factorial(n))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', type=int, default=80)\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n app.run(host=\"0.0.0.0\", port=args.port)\n","repo_name":"cybergrind/python_interview_bootstrap","sub_path":"service/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22664485990","text":"import json\nfrom pathlib import Path\nfrom typing import Any, List\n\nimport pkg_resources\n\n\ndef descend(root: Path, folder: Path):\n for item in folder.iterdir():\n if item.is_file():\n if item.suffix == '.json':\n filename = str(item)\n with open(filename, \"rt\") as fp:\n recipe = json.load(fp)\n ascii_filename = item.parts[-1].lower().encode(\n 'ascii', 'ignore'\n ).decode(\n 'ascii'\n ).replace(\n '_', ''\n ).replace(\n ' ', '-'\n ).replace(\n '--', '-'\n ).replace(\n ',', '-and'\n ).replace(\n '--', '-'\n )\n elements = []\n cuisine = recipe.get('recipeCuisine')\n if cuisine:\n elements.append(cuisine.lower())\n keywords = recipe.get('keywords')\n if keywords:\n elements += [\n keyword.lower().replace(' ', '-')\n for keyword in keywords\n ]\n new_folder = root.joinpath(*elements)\n if not new_folder.exists():\n new_folder.mkdir(parents=True)\n new_item = new_folder.joinpath(ascii_filename)\n item.rename(new_item)\n print(new_item)\n elif item.is_dir():\n descend(root, item)\n else:\n raise RuntimeError(\"Unknown type\")\n\n\nif __name__ == \"__main__\":\n root = Path(\"Recipes\")\n descend(root, root)\n","repo_name":"rob-blackbourn/scratch-python","sub_path":"recipes/rename_snakecase.py","file_name":"rename_snakecase.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7286896592","text":"import sys\r\nsys.path.append('Users/paulk/Documents/pol/programming/factor.py')\r\nsys.path.append('Users/paulk/Documents/pol/programming/check.py')\r\n\r\nimport factor\r\nimport check\r\n\r\n# not best approach\r\n# see from minute 9:30\r\n# Corey Schafer. (2017). Python Tutorial for Beginners 9. Found at:\r\n# https://www.youtube.com/watch?v=CqvZ3vGoGs0\r\n\r\n\r\nup = 17\r\nprint('Up is', up)\r\n\r\nfor i in range(1,200):\r\n if i == up:\r\n print('Up!!!!')\r\n elif factor.multiple(i, up):\r\n print('Up!!!!')\r\n elif check.contains(i, up):\r\n #elif str(up) in str(i):\r\n print('Up!!!!')\r\n else:\r\n print(i)\r\n\r\n \r\n","repo_name":"paulkevorkov/Up-drinking-game","sub_path":"mainUp.py","file_name":"mainUp.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17888089817","text":"from .basesorting import BaseSorting, BaseSortingSegment\n\nfrom pathlib import Path\nimport numpy as np\n\nfrom .core_tools import define_function_from_class\n\n\nclass NpzSortingExtractor(BaseSorting):\n \"\"\"\n Dead simple and super light format based on the NPZ numpy format.\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.savez.html#numpy.savez\n\n It is in fact an archive of several .npy format.\n All spike are store in two columns maner index+labels\n \"\"\"\n\n extractor_name = \"NpzSortingExtractor\"\n mode = \"file\"\n name = \"npz\"\n\n def __init__(self, file_path):\n self.npz_filename = file_path\n\n npz = np.load(file_path)\n num_segment = int(npz[\"num_segment\"][0])\n unit_ids = npz[\"unit_ids\"]\n sampling_frequency = float(npz[\"sampling_frequency\"][0])\n\n BaseSorting.__init__(self, sampling_frequency, unit_ids)\n\n for seg_index in range(num_segment):\n spike_indexes = npz[f\"spike_indexes_seg{seg_index}\"]\n spike_labels = npz[f\"spike_labels_seg{seg_index}\"]\n sorting_segment = NpzSortingSegment(spike_indexes, spike_labels)\n self.add_sorting_segment(sorting_segment)\n\n self._kwargs = {\"file_path\": str(Path(file_path).absolute())}\n\n @staticmethod\n def write_sorting(sorting, save_path):\n d = {}\n units_ids = np.array(sorting.get_unit_ids())\n d[\"unit_ids\"] = units_ids\n d[\"num_segment\"] = np.array([sorting.get_num_segments()], dtype=\"int64\")\n d[\"sampling_frequency\"] = np.array([sorting.get_sampling_frequency()], dtype=\"float64\")\n\n for seg_index in range(sorting.get_num_segments()):\n spike_indexes = []\n spike_labels = []\n for unit_id in units_ids:\n sp_ind = sorting.get_unit_spike_train(unit_id, segment_index=seg_index)\n spike_indexes.append(sp_ind.astype(\"int64\"))\n # spike_labels.append(np.ones(sp_ind.size, dtype='int64')*unit_id)\n spike_labels.append(np.array([unit_id] * sp_ind.size))\n\n # order times\n if len(spike_indexes) > 0:\n spike_indexes = np.concatenate(spike_indexes)\n spike_labels = np.concatenate(spike_labels)\n order = np.argsort(spike_indexes)\n spike_indexes = spike_indexes[order]\n spike_labels = spike_labels[order]\n else:\n spike_indexes = np.array([], dtype=\"int64\")\n spike_labels = np.array([], dtype=\"int64\")\n d[f\"spike_indexes_seg{seg_index}\"] = spike_indexes\n d[f\"spike_labels_seg{seg_index}\"] = spike_labels\n\n np.savez(save_path, **d)\n\n\nclass NpzSortingSegment(BaseSortingSegment):\n def __init__(self, spike_indexes, spike_labels):\n BaseSortingSegment.__init__(self)\n\n self.spike_indexes = spike_indexes\n self.spike_labels = spike_labels\n\n def get_unit_spike_train(self, unit_id, start_frame, end_frame):\n spike_times = self.spike_indexes[self.spike_labels == unit_id]\n if start_frame is not None:\n spike_times = spike_times[spike_times >= start_frame]\n if end_frame is not None:\n spike_times = spike_times[spike_times < end_frame]\n return spike_times.astype(\"int64\")\n\n\nread_npz_sorting = define_function_from_class(source_class=NpzSortingExtractor, name=\"read_npz_sorting\")\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/core/npzsortingextractor.py","file_name":"npzsortingextractor.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"}
+{"seq_id":"19813207040","text":"import contextlib\nimport socket\nfrom argparse import ArgumentParser\nfrom builtins import range, str\n\nimport ipaddr\nfrom facebook.network.Address.ttypes import BinaryAddress\nfrom neteng.fboss.ctrl import FbossCtrl\nfrom neteng.fboss.ctrl.ttypes import IpPrefix, UnicastRoute\nfrom neteng.fboss.qsfp import QsfpService\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.transport import TSocket\n\n\n\"\"\"Add, change, or delete a route on FBOSS controller\n\"\"\"\n\n\nDEFAULT_CLIENTID = 1\n\n\ndef parse_prefix(args):\n network = ipaddr.IPNetwork(args.prefix)\n return IpPrefix(\n ip=BinaryAddress(addr=network.ip.packed), prefixLength=network.prefixlen\n )\n\n\ndef parse_nexthops(args):\n return [BinaryAddress(addr=ipaddr.IPAddress(nh).packed) for nh in args.nexthop]\n\n\ndef flush_routes(args):\n with get_client(args) as client:\n client.syncFib(args.client, [])\n\n\ndef add_route(args):\n prefix = parse_prefix(args)\n nexthops = parse_nexthops(args)\n with get_client(args) as client:\n client.addUnicastRoutes(\n args.client, [UnicastRoute(dest=prefix, nextHopAddrs=nexthops)]\n )\n\n\ndef del_route(args):\n prefix = parse_prefix(args)\n with get_client(args) as client:\n client.deleteUnicastRoutes(args.client, [prefix])\n\n\ndef list_intf(args):\n with get_client(args) as client:\n # for intf in client.getInterfaceList():\n for (\n idx,\n intf,\n ) in client.getAllInterfaces().items(): # noqa: B301 T25377293 Grandfathered in\n print(\"L3 Interface %d: %s\" % (idx, format_interface(intf)))\n\n\ndef format_ip(ip):\n family = socket.AF_INET if len(ip.addr) == 4 else socket.AF_INET6\n return socket.inet_ntop(family, ip.addr)\n\n\ndef format_route(route):\n next_hops = \", \".join(format_ip(ip) for ip in route.nextHopAddrs)\n return \"%s --> %s\" % (format_prefix(route.dest), next_hops)\n\n\ndef format_prefix(prefix):\n return \"%s/%d\" % (format_ip(prefix.ip), prefix.prefixLength)\n\n\ndef format_interface(intf):\n return \"%s (%s)\" % (\", \".join(format_prefix(i) for i in intf.address), intf.mac)\n\n\ndef format_arp(arp):\n return \"%s -> %s\" % (format_ip(arp.ip), arp.mac)\n\n\ndef list_routes(args):\n with get_client(args) as client:\n for route in client.getRouteTable():\n print(\"Route %s\" % format_route(route))\n\n\ndef list_optics(args):\n with get_qsfp_client(args) as client:\n info = client.getTransceiverInfo()\n for key, val in info.items(): # noqa: B301 T25377293 Grandfathered in\n print(\"Optic %d: %s\" % (key, str(val)))\n\n\ndef list_ports(args):\n details = args.details\n with get_client(args) as client:\n for idx, intf in client.getPortStatus(\n list(range(1, 65))\n ).items(): # noqa: B301 T25377293 Grandfathered in\n stats = \"\"\n if details:\n stats = \" (%s)\" % client.getPortStats(idx)\n print(\n \"Port %d: [enabled=%s, up=%s, present=%s]%s\"\n % (idx, intf.enabled, intf.up, intf.present, stats)\n )\n\n\ndef list_arps(args):\n with get_client(args) as client:\n for arp in client.getArpTable():\n print(\"Arp: %s\" % (format_arp(arp)))\n\n\ndef list_ndps(args):\n with get_client(args) as client:\n for ndp in client.getNdpTable():\n print(\"NDP: %s\" % (format_arp(ndp)))\n\n\ndef list_vlans(args):\n with get_client(args) as client:\n # for intf in client.getInterfaceList():\n vlans = {}\n for _idx, intf in client.getAllInterfaces().items():\n vlans[intf.vlanId] = True\n for vlan in vlans:\n print(\"===== Vlan %d ==== \" % vlan)\n for address in client.getVlanAddresses(vlan):\n print(address.addr)\n\n\ndef enable_port(args):\n port = args.en_port\n with get_client(args) as client:\n portnum = int(port)\n client.setPortState(portnum, True)\n print(\"Port %d enabled\" % portnum)\n\n\ndef disable_port(args):\n port = args.dis_port\n with get_client(args) as client:\n portnum = int(port)\n client.setPortState(portnum, False)\n print(\"Port %d disabled\" % portnum)\n\n\n@contextlib.contextmanager\ndef get_client(args, timeout=5.0):\n sock = TSocket.TSocket(args.host, args.port)\n sock.setTimeout(timeout * 1000) # thrift timeout is in ms\n protocol = TBinaryProtocol.TBinaryProtocol(sock)\n transport = protocol.trans\n transport.open()\n client = FbossCtrl.Client(protocol)\n yield client\n transport.close()\n\n\n@contextlib.contextmanager\ndef get_qsfp_client(args, timeout=5.0):\n sock = TSocket.TSocket(args.host, args.port)\n sock.setTimeout(timeout * 1000) # thrift timeout is in ms\n protocol = TBinaryProtocol.TBinaryProtocol(sock)\n transport = protocol.trans\n transport.open()\n client = QsfpService.Client(protocol)\n yield client\n transport.close()\n\n\ndef main() -> None:\n ap = ArgumentParser()\n ap.add_argument(\n \"--port\", \"-p\", type=int, default=5909, help=\"the controller thrift port\"\n )\n ap.add_argument(\n \"--client\",\n \"-c\",\n type=int,\n default=DEFAULT_CLIENTID,\n help=\"the client ID used to manipulate the routes\",\n )\n ap.add_argument(\"--host\", help=\"the controller hostname\", default=\"localhost\")\n subparsers = ap.add_subparsers()\n\n flush_parser = subparsers.add_parser(\n \"flush\", help=\"flush all existing non-interface routes\"\n )\n flush_parser.set_defaults(func=flush_routes)\n\n add_parser = subparsers.add_parser(\n \"add\", help=\"add a new route or change an existing route\"\n )\n add_parser.set_defaults(func=add_route)\n add_parser.add_argument(\n \"prefix\", help='the route prefix, i.e. \"1.1.1.0/24\" or \"2001::0/64\"'\n )\n add_parser.add_argument(\n \"nexthop\",\n nargs=\"+\",\n help='the nexthops of the route, i.e \"10.1.1.1\" or \"2002::1\"',\n )\n\n del_parser = subparsers.add_parser(\"delete\", help=\"delete an existing route\")\n del_parser.set_defaults(func=del_route)\n del_parser.add_argument(\n \"prefix\", help='The route prefix, i.e. \"1.1.1.0/24\" or \"2001::0/64\"'\n )\n\n list_parser = subparsers.add_parser(\"list_intf\", help=\"list switch interfaces\")\n list_parser.set_defaults(func=list_intf)\n list_parser.add_argument(\n \"--details\",\n action=\"store_true\",\n help=\"List all information about the interface\",\n default=False,\n )\n\n list_route_parser = subparsers.add_parser(\"list_routes\", help=\"list switch routes\")\n list_route_parser.set_defaults(func=list_routes)\n list_route_parser.add_argument(\n \"--details\",\n action=\"store_true\",\n help=\"List all information about the routes\",\n default=False,\n )\n\n list_optic_parser = subparsers.add_parser(\"list_optics\", help=\"list switch optics\")\n list_optic_parser.set_defaults(func=list_optics)\n list_optic_parser.add_argument(\n \"--details\",\n action=\"store_true\",\n help=\"List all information about the optics\",\n default=False,\n )\n\n list_port_parser = subparsers.add_parser(\"list_ports\", help=\"list switch ports\")\n list_port_parser.set_defaults(func=list_ports)\n list_port_parser.add_argument(\n \"--details\",\n action=\"store_true\",\n help=\"List all information about the ports\",\n default=False,\n )\n\n list_vlan_parser = subparsers.add_parser(\"list_vlans\", help=\"list switch vlans\")\n list_vlan_parser.set_defaults(func=list_vlans)\n list_vlan_parser.add_argument(\n \"--details\",\n action=\"store_true\",\n help=\"List all information about the vlans\",\n default=False,\n )\n\n list_arp_parser = subparsers.add_parser(\"list_arps\", help=\"list switch arps\")\n list_arp_parser.set_defaults(func=list_arps)\n list_arp_parser.add_argument(\n \"--details\",\n action=\"store_true\",\n help=\"List all information about the arps\",\n default=False,\n )\n\n list_ndp_parser = subparsers.add_parser(\"list_ndps\", help=\"list switch ndps\")\n list_ndp_parser.set_defaults(func=list_ndps)\n list_ndp_parser.add_argument(\n \"--details\",\n action=\"store_true\",\n help=\"List all information about the ndps\",\n default=False,\n )\n\n enable_port_parser = subparsers.add_parser(\"enable_port\", help=\"Enable port\")\n enable_port_parser.set_defaults(func=enable_port)\n enable_port_parser.add_argument(\"en_port\", help=\"Port to enable\")\n\n disable_port_parser = subparsers.add_parser(\"disable_port\", help=\"Enable port\")\n disable_port_parser.set_defaults(func=disable_port)\n disable_port_parser.add_argument(\"dis_port\", action=\"store\", help=\"Port to disable\")\n\n args = ap.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebook/fboss","sub_path":"fboss/agent/tools/fboss_route.py","file_name":"fboss_route.py","file_ext":"py","file_size_in_byte":8802,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"53"}
+{"seq_id":"7177644889","text":"class Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n def inverse(i, j):\n while i < j:\n nums[i], nums[j] = nums[j], nums[i]\n i, j = i + 1, j - 1\n \n n = len(nums)\n k = k % n\n inverse(0, n-1)\n inverse(0, k-1)\n inverse(k, n-1)\n return nums","repo_name":"mmkvdev/leetcode","sub_path":"October/Week3/Day1/rotateArray.py","file_name":"rotateArray.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"43814640580","text":"import urllib\n\nfrom gi.repository import Gtk, Gdk, GObject\nfrom urllib.parse import urlparse\n\nfrom ..core.p2p import P2PClient\nfrom ..core.events import EventHandler\n\ndef show_msg_box(parent, title, message, buttons=Gtk.ButtonsType.OK_CANCEL, type=Gtk.MessageType.WARNING):\n \"\"\" Show message box\n :param parent: Parent window\n :param title: Title\n :param message: Content\n :param buttons: Buttons\n :param type: Icon type\n :return: MessageBox value\n \"\"\"\n dialog = Gtk.MessageDialog(parent, 0, type, buttons, title)\n dialog.format_secondary_text(message)\n response = dialog.run()\n dialog.destroy()\n return response == Gtk.ResponseType.OK\n\nclass FileList(list):\n def __init__(self):\n list.__init__(self)\n self.total_size = 0\n self.total_files = 0\n\n def append(self, files):\n self.total_size += files[1]\n self.total_files += len(files[0])\n list.append(self, files)\n\nclass AppWindow(Gtk.Window, EventHandler):\n def __init__(self):\n super(AppWindow, self).__init__(title=\"FileTransfer\")\n\n self.file_list = FileList()\n\n self.set_resizable(False)\n self.set_size_request(400, 300)\n self.set_border_width(6)\n\n self.__configure_drag_drop()\n self.__create_layout()\n\n self.connect(\"delete-event\", Gtk.main_quit)\n self.show_all()\n\n self.p2p = P2PClient(self)\n Gtk.main()\n\n # UI\n def __create_client_list_panel(self):\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_size_request(150, -1)\n\n self.clients = Gtk.ListStore(str, bool)\n clients_view = Gtk.TreeView(self.clients)\n def cell_toggled(cell_renderer, path):\n self.clients[path][1] = not self.clients[path][1]\n\n toggle_renderer = Gtk.CellRendererToggle()\n toggle_renderer.connect(\"toggled\", cell_toggled)\n clients_view.append_column(Gtk.TreeViewColumn(\"\", toggle_renderer, active=1))\n clients_view.append_column(Gtk.TreeViewColumn(\"Device\", Gtk.CellRendererText(), text=0))\n\n scrolled_window.add(clients_view)\n return scrolled_window\n\n def __create_file_list_panel(self):\n v_box = Gtk.VBox(spacing=6)\n\n # List of files\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n\n self.files = Gtk.ListStore(str, str, str)\n files_view = Gtk.TreeView(self.files)\n files_view.set_size_request(-1, 128)\n files_view.append_column(Gtk.TreeViewColumn(\"Size\", Gtk.CellRendererText(), text=0))\n files_view.append_column(Gtk.TreeViewColumn(\"File\", Gtk.CellRendererText(), text=1))\n files_view.append_column(Gtk.TreeViewColumn(\"Directory\", Gtk.CellRendererText(), text=2))\n\n scrolled_window.add(files_view)\n v_box.pack_start(scrolled_window, True, True, 0)\n\n # Files info info bar\n h_box = Gtk.HBox(spacing=6)\n v_box.pack_start(h_box, False, True, 0)\n\n self.total_size_label = Gtk.Label(\"\", xalign=0.0)\n h_box.pack_start(self.total_size_label, True, True, 0)\n self.total_files_label = Gtk.Label(\"\", xalign=0.0)\n h_box.pack_start(self.total_files_label, False, True, 0)\n\n # Toolbar\n toolbar = Gtk.Toolbar()\n toolbar.set_orientation(Gtk.Orientation.HORIZONTAL)\n toolbar.set_icon_size(Gtk.IconSize.LARGE_TOOLBAR)\n toolbar.set_style(Gtk.ToolbarStyle.BOTH)\n v_box.pack_start(toolbar, False, True, 0)\n\n toolbar.insert(Gtk.ToolButton(Gtk.STOCK_OPEN), -1)\n toolbar.insert(Gtk.ToolButton(Gtk.STOCK_SAVE), -1)\n toolbar.insert(Gtk.SeparatorToolItem(), -1)\n toolbar.insert(Gtk.ToolButton(Gtk.STOCK_ADD), -1)\n toolbar.insert(Gtk.ToolButton(Gtk.STOCK_DELETE), -1)\n toolbar.insert(Gtk.SeparatorToolItem(), -1)\n\n send = Gtk.ToolButton(Gtk.STOCK_EXECUTE)\n send.connect(\"clicked\", self.__send_files)\n toolbar.insert(send, -1)\n\n self._reload_file_list()\n return v_box\n\n def __create_layout(self):\n h_box = Gtk.HBox(spacing=6)\n h_box.pack_start(self.__create_client_list_panel(), True, True, 0)\n h_box.pack_start(self.__create_file_list_panel(), True, True, 0)\n self.add(h_box)\n\n def __configure_drag_drop(self):\n self.drag_dest_set(0, [], 0)\n self.connect(\"drag-motion\", lambda widget, context, x, y, time:(\n Gdk.drag_status(context, Gdk.DragAction.COPY, time)\n , True\n ))\n self.connect(\"drag-drop\", lambda widget, context, x, y, time:(\n widget.drag_get_data(context, context.list_targets()[-1], time)\n , context.finish(True, False, time)\n ))\n self.connect(\"drag-data-received\", self.__on_drag_data_received)\n\n @staticmethod\n def get_text_size(bytes):\n for unit in [\"B\", \"KB\", \"MB\", \"GB\"]:\n if bytes < 1024.0:\n return \"{0:.2f}{1}\".format(bytes, unit)\n bytes /= 1024.0\n\n def __send_files(self, button):\n for ip in [client[0] for client in self.clients if client[1]]:\n self.p2p.send_files(ip, self.file_list)\n\n def _reload_file_list(self):\n self.total_files_label.set_text(\"Files: {}\".format(self.file_list.total_files))\n self.total_size_label.set_text(\"Total size: {}\".format(AppWindow.get_text_size(self.file_list.total_size)))\n\n self.files.clear()\n for nested_list in self.file_list:\n for file in nested_list[0]:\n self.files.append([AppWindow.get_text_size(file[1]), file[0], nested_list[2]])\n\n # Messages\n def __on_drag_data_received(self, widget, drag_context, x, y, data, info, time):\n for url in urllib.parse.unquote(data.get_data().decode(\"UTF-8\")).splitlines():\n self.file_list.append(P2PClient.list_files(urlparse(url).path))\n self._reload_file_list()\n\n def on_device_list_update(self, devices):\n self.clients.clear()\n for ip in devices:\n self.clients.append([ip, False])\n\n def on_accept_connection_prompt(self, ip, thread):\n def runner():\n if show_msg_box(self, \"Connection\", \"Incoming connection from {}. Accept?\".format(ip)):\n thread.accept_connection()\n else:\n thread.refuse_connection()\n return False\n\n GObject.idle_add(runner)\n\n def on_refuse_connection_prompt(self):\n GObject.idle_add(show_msg_box, self, \"Connection\", \"Receiver refused the connection!\", Gtk.ButtonsType.CANCEL)\n","repo_name":"Mati365/pyFileTransfer","sub_path":"filetransfer/client/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":6687,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"31003849143","text":"import os, shutil, datetime,time\n\ndef newCD(x):\n if os.path.isdir(x):\n try:\n print(os.getcwd())\n os.chdir(path)\n print(os.getcwd())\n except FileNotFoundError:\n print('Папка не найдена')\n else:\n print('Параметр передан неверно')\n\ndef newCP(x0,x):\n if os.path.isdir(x0):\n try:\n shutil.copytree(x0,x)\n except FileExistsError:\n print('Папка существует')\n else:\n try:\n shutil.copy(x0,x)\n except FileNotFoundError:\n print('Файл не найден')\n\ndef newLS(folders_only=False):\n res = os.listdir()\n if folders_only:\n res = [f for f in result if os.path.isdir(f)]\n print(res)\ndef Touch(x, text=None):\n with open(x, 'w', encoding='utf-8') as f:\n if text:\n f.write(text)\n\ndef newRM(x):\n if os.path.isdir(x):\n os.rmdir(x)\n else:\n try:\n os.remove(x)\n except FileNotFoundError:\n print('Файл не найден')\n \ndef Mkdir(x):\n try:\n os.mkdir(x)\n except FileExistsError:\n print('Папка существует')\n\ndef _WriteLog(text):\n boardTime = datetime.datetime.now()\n boardTimeUnix = round(boardTime.timestamp(),1)\n res = f'{boardTimeUnix} - {text}'\n with open('log.txt', 'a', encoding='utf-8') as f:\n f.write(res + '\\n')\n\n\nif __name__ == '__main__':\n Touch('touch_test.txt')\n Mkdir('clear-dir')\n newLS()\n\n newCP('touch_test.txt', 'touch_copy.txt')\n newCP('clear-dir', 'clear-copy')\n newLS()\n\n newRM('touch_test.txt')\n newRM('clear-dir')\n newRM('touch_copy.txt')\n newRM('clear-copy')\n newLS()\n","repo_name":"Panikowsky-M/py-samples","sub_path":"gb/les8/l8_mgr.py","file_name":"l8_mgr.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2764608476","text":"import math\n\n\ndef main(a, b, z):\n result = 0\n for j in range(1, b+1):\n for i in range(1, a+1):\n result += 46*(math.log(97*(j**3))) - \\\n ((math.cos(i-1-(i**3)))**6)-(z**7)\n return result\n","repo_name":"Kaz1deX/PythonPractices","sub_path":"kispython/ex3_var21.py","file_name":"ex3_var21.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21451107735","text":"class Solution(object):\n def peopleIndexes(self, favoriteCompanies):\n \"\"\"\n T:O(log*n) S:O(n)\n Runtime: 380 ms, faster than 85.58% of Python online submissions for People Whose List of Favorite Companies Is Not a Subset of Another List.\n Memory Usage: 29.9 MB, less than 100.00% of Python online submissions for People Whose List of Favorite Companies Is Not a Subset of Another List.\n :type favoriteCompanies: List[List[str]]\n :rtype: List[int]\n \"\"\"\n l = len(favoriteCompanies)\n self.parent = [i for i in range(l)]\n for i in range(l):\n for j in range(i+1, l):\n root_i = self.find(i)\n root_j = self.find(j)\n if root_i == root_j:\n continue\n elif self.contain(favoriteCompanies[i], favoriteCompanies[j]):\n self.parent[j] = root_i\n elif self.contain(favoriteCompanies[j], favoriteCompanies[i]):\n self.parent[i] = root_j\n return [i for i in range(l) if self.parent[i] == i] \n\n def find(self, i):\n if self.parent[i] != i:\n self.parent[i] = self.parent[self.parent[i]]\n return self.parent[i]\n\n def contain(self, a, b):\n \"\"\"\n check if b is containing in a\n \"\"\"\n if len(a) <= len(b):\n return False\n set_a = set(a)\n for char in b:\n if char not in set_a:\n return False\n return True\n\n\n# print(Solution().peopleIndexes([[\"leetcode\",\"google\",\"facebook\"],[\"google\",\"microsoft\"],[\"google\",\"facebook\"],[\"google\"],[\"amazon\"]]))\nprint(Solution().peopleIndexes([[\"leetcode\",\"google\",\"facebook\"],[\"leetcode\",\"amazon\"],[\"facebook\",\"google\"]]))","repo_name":"jerrt2003/leetcode-in-python","sub_path":"1452_People_Whose_List_of_Favorite_Companies_Is_Not_a_Subset_of_Another_List/unionFind.py","file_name":"unionFind.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9428620746","text":"from PyQt6.QtWidgets import QWidget, QFileDialog\nfrom UI.form import *\nfrom Models.production_base import ProdBaseModel\nfrom src.database import connect_QSQL_db\nfrom Widgets.edit_facts import InitData\n\nfrom src.core_experta2 import *\n\n\nclass App(QWidget):\n tablename = \"test\"\n\n def __init__(self):\n QWidget.__init__(self)\n self.w = QtWidgets.QMainWindow()\n self.w_root = Ui_MainWindow()\n self.w_root.setupUi(self.w)\n\n self.init_data_form = InitData(self)\n self.w_root.pushButton_2.setEnabled(False)\n self.w_root.pushButton_3.setEnabled(False)\n\n self.w_root.pushButton.clicked.connect(self.download_base)\n self.w_root.pushButton_3.clicked.connect(self.init_data)\n self.w_root.pushButton_2.clicked.connect(self.solve)\n\n self.w.show()\n\n def download_base(self):\n filename, _ = QFileDialog.getOpenFileName(None, 'Open Base', './', \"Database (*.db *.sqlite *.sqlite3)\")\n self.db = connect_QSQL_db(filename)\n if self.db:\n self.base_model = ProdBaseModel(self.db, self.tablename)\n self.w_root.tableView.setModel(self.base_model)\n self.resize_columns()\n # self.w_root.tableView.horizontalHeader().moveSection(5, 3)\n # self.w_root.tableView.horizontalHeader().moveSection(6, 4)\n self.db.close()\n self.df_rules = getRulesFromDb(filename, self.tablename)\n self.w_root.pushButton_2.setEnabled(True)\n self.w_root.pushButton_3.setEnabled(True)\n KE = type(\"KE\", (KnowledgeEngine,), dict())\n addRules(self.df_rules, KE)\n self.engine = KE()\n self.engine.reset()\n print(self.df_rules)\n\n def init_data(self):\n self.init_data_form.w.show()\n # self.update_work_memory()\n\n def solve(self):\n self.engine.run()\n self.update_work_memory()\n\n def update_work_memory(self):\n facts = []\n for i in range(1, len(self.engine.facts)):\n key = str(list(self.engine.facts[i].as_dict().keys())[0])\n value = str(list(self.engine.facts[i].as_dict().values())[0])\n if key != \"0\":\n k_value = f\"{key} = {value} \\t\\t\"\n facts.append(f\"{k_value}\")\n else:\n facts.append(value)\n self.w_root.textBrowser.clear()\n self.w_root.textBrowser.setText(\"\\n\".join(facts))\n\n def resize_columns(self):\n # for i in range(6):\n # self.w_root.tableView.resizeColumnToContents(i)\n # self.w_root.tableView.setColumnWidth(i, self.w_root.tableView.columnWidth(i) + 10)\n self.w_root.tableView.setColumnWidth(0, 100)\n self.w_root.tableView.setColumnWidth(1, 400)\n self.w_root.tableView.setColumnWidth(2, 350)\n self.w_root.tableView.setColumnWidth(3, 100)\n","repo_name":"yurchest/Diplom","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8108266525","text":"from django.shortcuts import render, redirect\nfrom .forms import UserLoginForm,UserSignUpForm\nfrom .models import Profile\nfrom django.contrib.auth import login, logout\n\n\n# Create your views here.\ndef signin(request):\n form = UserLoginForm()\n if request.method == 'POST':\n form = UserLoginForm(request, request.POST)\n if form.is_valid():\n login(request, form.get_user())\n return redirect('main:index')\n\n return render(request, 'signin.html', {'form': form})\n\ndef signup(request): \n form = UserSignUpForm()\n if request.method =='POST':\n form = UserSignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('main:index')\n\n return render(request, 'signup.html', {'form': form})\n\ndef signout(request):\n logout(request)\n return redirect('main:index')\n\n\ndef new_profile(request):\n #로그인하지 않았다면 프로필 누르더라도 계속 홈으로 이동\n if request.user.is_anonymous:\n return redirect(\"main:index\")\n\n #로그인 했다면 해당 user 의 profile 보기\n profile, created = Profile.objects.get_or_create(user = request.user)\n return render(request, 'Profile.html', {\"profile\": profile})\n # get = 이미 존재한다. = created = FALSE\n # create = 존재하지 않는다. = created = TRUE\n\n\ndef create_profile(request):\n profile, created = Profile.objects.get_or_create(user = request.user)\n if request.method == \"POST\":\n profile.nickname = request.POST.get('nickname')\n profile.image = request.FILES.get('image')\n profile.save()\n return redirect('user:new_profile')\n #나쁜 사용자\n return render(request, \"Profile.html\", {'profile':profile})\n","repo_name":"minjeoong/10Day_Django","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16265414568","text":"import random\nimport os\n\n# Outline\n# -----------\n# draw the grid\n# pick random location for player\n# pick random location for exit door\n# pick random location for monster\n# draw player in the grid\n# take input for movement\n# move player, unless invalid move (past edges of grid)\n# check for win/loss\n# clear screen and redraw grid\n\n# List of tuples\nCELLS = [(0,0), (1,0), (2,0), (3,0), (4,0),\n (0,1), (1,1), (2,1), (3,1), (4,1),\n (0,2), (1,2), (2,2), (3,2), (4,2),\n (0,3), (1,3), (2,3), (3,3), (4,3),\n (0,4), (1,4), (2,4), (3,4), (4,4)]\n\ndef clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef get_locations():\n # monster = None\n # door = None\n # player = None\n # return monster, door, player\n return random.sample(CELLS, 3)\n\n\ndef move_player(player, move):\n x, y = player\n if move == \"LEFT\":\n x -= 1\n if move == \"RIGHT\":\n x += 1\n if move == \"UP\":\n y -= 1\n if move == \"DOWN\":\n y += 1\n # get the player's location\n # if move == LEFT, x-1\n # if move == RIGHT, x+1\n # if move == UP, y-1\n # if move == DOWN, y+1\n # return player\n return x, y\n\ndef get_moves(player):\n moves = [\"LEFT\", \"RIGHT\", \"UP\", \"DOWN\"]\n x, y = player\n # if player's y == 0, they can't move UP\n # if player's y == 4, they can't move DOWN\n # if player's x == 0, they can't move LEFT\n # if player's x == 4, they can't move RIGHT\n if x == 0:\n moves.remove(\"LEFT\")\n if x == 4:\n moves.remove(\"RIGHT\")\n if y == 0:\n moves.remove(\"UP\")\n if y == 4:\n moves.remove(\"DOWN\")\n return moves\n\ndef draw_map(player):\n print(\" _\" * 5)\n tile = \"|{}\"\n for cell in CELLS:\n x, y = cell\n if x < 4:\n line_end = \"\"\n if cell == player:\n output = tile.format(\"X\")\n else:\n output = tile.format(\"_\")\n else:\n line_end = \"\\n\"\n if cell == player:\n output = tile.format(\"X|\")\n else:\n output = tile.format(\"_|\")\n print(output, end=line_end)\n\ndef game_loop():\n monster, door, player = get_locations()\n playing = True\n\n while playing:\n clear_screen()\n draw_map(player)\n valid_moves = get_moves(player)\n print(\"You're currently in room {}\".format(player)) # fill in with player position\n print(\"You can move {}\".format(\", \".join(valid_moves))) # fill with available moves\n print(\"Enter Q to Quit\")\n\n move = input(\"> \").upper()\n if move == 'Q':\n print(\"\\n ** See you next time. **\\n\")\n break\n if move in valid_moves:\n player = move_player(player, move)\n\n if player == monster:\n print(\"\\n ** Oh no! The monster got you. Better luck next time! **\\n\")\n playing = False\n\n if player == door:\n print(\"\\n ** You've escaped! Congratulations! **\\n\")\n playing = False\n\n else:\n input(\"\\n ** Walls are hard! Don't run into them! **\\n\")\n else:\n if input(\"Play again? [Y/n] \").lower() != 'n':\n game_loop()\n\n # Good move? Change the player position.\n # Bad move? Don't change anything!\n # On the door? They win!\n # On the monster? They lose!\n # Otherwise, loop back around.\n\nclear_screen()\nprint(\"Welcome to the dungeon!\")\ninput(\"Press RETURN to START\")\nclear_screen()\ngame_loop()\n","repo_name":"duliodenis/python_master_degree","sub_path":"unit_02/03_collections/6-Dungeon-Game/dungeon-game.py","file_name":"dungeon-game.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"}
+{"seq_id":"5523862526","text":"from urllib.parse import urlencode\n\nfrom django import http\n\nfrom .models import InstallerUpdate, UpdateQueue\n\nUSER_UPDATE = 0b01\nADMIN_UPDATE = 0b10\n\n\ndef get_latest_installer(request):\n \"\"\"Get latest full HiSPARC installer\"\"\"\n\n installer = InstallerUpdate.objects.filter(queue__slug='hisparc').first()\n return http.HttpResponseRedirect(installer.installer.url)\n\n\ndef update_check_querystring(request, queue):\n \"\"\"Check for software updates\"\"\"\n\n try:\n admin_version = request.GET['admin_version']\n user_version = request.GET['user_version']\n except KeyError:\n return http.HttpResponseBadRequest('Incomplete request.')\n\n return update_check(request, queue, admin_version, user_version)\n\n\ndef update_check(request, queue, admin_version, user_version):\n try:\n queue = UpdateQueue.objects.get(slug=queue)\n except UpdateQueue.DoesNotExist:\n return http.HttpResponseBadRequest('Queue does not exist.')\n\n answer = {'mustUpdate': 0b00}\n\n admin_updates = queue.admin_updates.filter(version__gt=admin_version)\n if admin_updates:\n answer['mustUpdate'] |= ADMIN_UPDATE\n latest = admin_updates.first()\n answer['newVersionAdmin'] = latest.version\n answer['urlAdmin'] = latest.update.url\n\n user_updates = queue.user_updates.filter(version__gt=user_version)\n if user_updates:\n answer['mustUpdate'] |= USER_UPDATE\n latest = user_updates.first()\n answer['newVersionUser'] = latest.version\n answer['urlUser'] = latest.update.url\n\n return http.HttpResponse(urlencode(answer), content_type='text/plain')\n","repo_name":"HiSPARC/publicdb","sub_path":"publicdb/updates/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"12006184170","text":"from yugabyte_db_thirdparty.build_definition_helpers import * # noqa\n\n\nclass LibCDSDependency(Dependency):\n def __init__(self) -> None:\n super(LibCDSDependency, self).__init__(\n name='libcds',\n version='2.3.3',\n url_pattern='https://github.com/khizmax/libcds/archive/v{0}.tar.gz',\n build_group=BuildGroup.POTENTIALLY_INSTRUMENTED)\n self.copy_sources = False\n\n def build(self, builder: BuilderInterface) -> None:\n builder.build_with_cmake(\n self,\n extra_args=[\n '-DCMAKE_BUILD_TYPE=Release',\n '-DBUILD_SHARED_LIBS=ON'\n ]\n )\n","repo_name":"yugabyte/yugabyte-db-thirdparty","sub_path":"python/build_definitions/libcds.py","file_name":"libcds.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"}
+{"seq_id":"30645455168","text":"'''\r\n Train routines, visualisation, and class\r\n'''\r\nimport os\r\nfrom datetime import datetime\r\nimport torch\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom multiprocessing import cpu_count\r\n\r\nfrom DiscreteCNN import DiscreteCNN_core\r\nfrom DiscreteCNN_data_loader import DiscreteCNN_Dataset\r\n\r\nif __name__ == \"__main__\":\r\n # Directory check\r\n if os.path.basename(os.getcwd()) != \"2022FilamentClassifierNET\":\r\n print(\"ERROR:\\nEvery script must be executed from inside the 2022FilamentClassifierNET directory\")\r\n quit()\r\n # ste model name\r\n now = datetime.now()\r\n model_name = now.strftime(\"model_%Y%m%d_%H%M%S.pt\")\r\n # Set hyperparams\r\n n_classes = 5\r\n n_training_steps = 750\r\n batch_size = 32\r\n num_workers = np.min([batch_size, int(cpu_count()) if not torch.cuda.is_available() else torch.cuda.device_count()])\r\n lr0 = 0.001\r\n # Get training data\r\n csv_file = os.path.join(os.getcwd(), \"NeuralNetwork\", \"DiscreteCNN\", \"train_valid_data\", \"train.csv\")\r\n train_dataset = DiscreteCNN_Dataset(csv_file, n_classes=n_classes)\r\n train_loader = torch.utils.data.DataLoader(train_dataset,\r\n batch_size=batch_size,\r\n shuffle=True,\r\n num_workers=num_workers)\r\n # Get validation data\r\n csv_file = os.path.join(os.getcwd(), \"NeuralNetwork\", \"DiscreteCNN\", \"train_valid_data\", \"validation.csv\")\r\n valid_dataset = DiscreteCNN_Dataset(csv_file, n_classes=n_classes)\r\n valid_loader = torch.utils.data.DataLoader(valid_dataset,\r\n batch_size=batch_size,\r\n shuffle=False,\r\n num_workers=num_workers)\r\n\r\n # Get the used device\r\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n # Create network\r\n model = DiscreteCNN_core(n_voxels_per_side=41, out_point_classes=n_classes)\r\n model.to(device)\r\n # Loss, optmiser, lerning-rate trajectory\r\n criterion = torch.nn.CrossEntropyLoss()\r\n optimizer = torch.optim.SGD(model.parameters(), lr=lr0, momentum=0.9)\r\n\r\n # Training + Validation cycle\r\n plt.ion()\r\n fig = plt.figure()\r\n spec = fig.add_gridspec(2, 3)\r\n ax_tv = fig.add_subplot(spec[:,:2]) # losses\r\n ax_im = fig.add_subplot(spec[0,2]) # confusion matrix\r\n ax_rw = fig.add_subplot(spec[1,2]) # accuracy chart\r\n t_loss_list = []\r\n v_loss_list, v_steps_list, v_right_wrong_list = [], [], []\r\n for training_step in range(n_training_steps):\r\n print(f\"Train step {training_step:3d}\", end=\"\\r\")\r\n # Training ------------------------------\r\n data = next(iter(train_loader))\r\n # unpack data\r\n inputs = data[\"input_t\"].to(device)\r\n labels = data[\"label_t\"].to(device)\r\n # zero the parameter gradients\r\n optimizer.zero_grad()\r\n # forward + backward + optimize\r\n outputs = model(inputs)\r\n t_loss = criterion(outputs, labels)\r\n t_loss.backward()\r\n optimizer.step()\r\n # Learning rate\r\n if training_step < int(n_training_steps/5):\r\n lr = ((lr0*2-lr0)/int(n_training_steps/5))*training_step + lr0\r\n else:\r\n lr = -(2*lr0/(n_training_steps+1-int(n_training_steps/5)))*training_step + 3*lr0\r\n for g in optimizer.param_groups:\r\n g['lr'] = lr\r\n # save res\r\n t_loss_list.append(t_loss.item())\r\n # Validation ----------------------------\r\n if (training_step == 0) or (training_step%5 == 0):\r\n v_loss = 0.0\r\n confusion_matrix_image = np.zeros((n_classes, n_classes))\r\n right, wrong = 0, 0\r\n with torch.no_grad():\r\n for i, v_data in enumerate(valid_loader, 0):\r\n inputs = v_data[\"input_t\"].to(device)\r\n labels = v_data[\"label_t\"].to(device)\r\n outputs = model(inputs)\r\n # save res\r\n v_loss += criterion(outputs, labels).item()/len(valid_loader)\r\n for rowL, rowO in zip(labels, outputs):\r\n idx_r = int( rowL.cpu().detach().numpy() )\r\n idx_c = int( np.argmax( torch.nn.Softmax(dim=-1)(rowO).cpu().detach().numpy()) )\r\n confusion_matrix_image[idx_r, idx_c] += 1\r\n if idx_r == idx_c:\r\n right += 1\r\n else:\r\n wrong += 1\r\n v_loss_list.append(v_loss)\r\n v_steps_list.append(training_step)\r\n v_right_wrong_list.append([right, wrong])\r\n # Display images -------------------------\r\n #\r\n ax_tv.clear()\r\n ax_tv.plot(t_loss_list, \"r-\", label=\"T\", linewidth=0.7)\r\n ax_tv.plot(v_steps_list,v_loss_list, \"b-\", label=\"V\", linewidth=1.4)\r\n ax_tv.set_ylabel(\"Cross Entropy loss\")\r\n ax_tv.set_xlabel(\"Training steps\")\r\n ax_tv.set_title(\"Learning trajectory\")\r\n ax_tv.legend()\r\n ax_tv.grid()\r\n #\r\n for r in range(confusion_matrix_image.shape[0]):\r\n confusion_matrix_image[r,:] = confusion_matrix_image[r,:] / np.sum(confusion_matrix_image[r,:]) if np.sum(confusion_matrix_image[r,:]) != 0 else 0*confusion_matrix_image[r,:]\r\n ax_im.clear()\r\n ax_im.imshow(confusion_matrix_image, vmin=0, vmax=1, cmap=\"bone\")\r\n ax_im.set_ylabel(\"True class (%)\")\r\n ax_im.set_xlabel(\"Predicted class (%)\")\r\n ax_im.set_title(\"Confusion matrix\\n(with row-wise normalisation)\")\r\n #\r\n rwl = np.array(v_right_wrong_list)\r\n n_valid = rwl[-1,0] + rwl[-1,1]\r\n ax_rw.plot(v_steps_list, rwl[:,0], label=\"Right classification\")\r\n ax_rw.plot([0, v_steps_list[-1]], [n_valid, n_valid], \"--\", color=\"black\", label=\"Tot\")\r\n ax_rw.legend()\r\n ax_rw.set_title(f\"Correct inferences: {rwl[-1,0]:4d} ({100 * rwl[-1,0]/n_valid:3.2f}%)\")\r\n plt.pause(0.5)\r\n # Save model params if validation is the best\r\n if training_step != 0:\r\n if np.min(v_loss_list) == v_loss_list[-1]:\r\n torch.save(model.state_dict(), os.path.join(os.getcwd(), \"NeuralNetwork\", \"DiscreteCNN\", \"trained_networks\", model_name))\r\n print('Finished Training')\r\n plt.ioff()\r\n plt.show()\r\n \r\n ","repo_name":"Matteoleccardi/2022FilamentClassifierNET","sub_path":"NeuralNetwork/DiscreteCNN/DiscreteCNN_trainer.py","file_name":"DiscreteCNN_trainer.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8664025905","text":"from logging import getLogger\nfrom pyoffline_models import Resource\n\n\nclass Downloader:\n def __init__(self, http_client):\n self.logger = getLogger()\n self.http_client = http_client\n\n\n def download(self, resource: Resource):\n self.logger.info(f'Requesting {resource.url}.')\n response = self.http_client.get(resource.url)\n self.logger.info(f'Got HTTP {response.status_code}, {response.headers[\"Content-Type\"]}.')\n\n resource.url = response.url\n resource.mimeType = response.headers[\"Content-Type\"]\n resource.encoding = response.encoding\n resource.body = response.text\n\n return resource\n","repo_name":"andreimoustache/py-offline","sub_path":"pyoffline_downloader.py","file_name":"pyoffline_downloader.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"18086001122","text":"from msrest.serialization import Model\n\n\nclass CheckNameAvailabilityResponse(Model):\n \"\"\"CheckNameAvailabilityResponse.\n\n :param name_available: Checks if the namespace name is available\n :type name_available: bool\n :param reason: States the reason due to which the namespace name is not\n available\n :type reason: str\n :param message: The messsage returned when checking for namespace name\n availability\n :type message: str\n \"\"\"\n\n _attribute_map = {\n 'name_available': {'key': 'NameAvailable', 'type': 'bool'},\n 'reason': {'key': 'Reason', 'type': 'str'},\n 'message': {'key': 'Message', 'type': 'str'},\n }\n\n def __init__(self, name_available=None, reason=None, message=None):\n self.name_available = name_available\n self.reason = reason\n self.message = message\n","repo_name":"AntObr/credit-to-customer","sub_path":"env/lib/python2.7/site-packages/azure/mgmt/notificationhubs/models/check_name_availability_response.py","file_name":"check_name_availability_response.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"919755323","text":"#!/usr/bin/python3\n\n#Interviewbit-stack\n#Question link: https://www.interviewbit.com/problems/redundant-braces/\n\ndef findDuplicate(string):\n stack = []\n operators = ['+', '-', '*', '/', '(']\n #Edge case- '(a)' should be True\n if len(string)==3 and '(' in string and ')' in string:\n return True\n for i in string:\n if i == ')':\n top = stack[-1]\n stack.pop()\n if top == '(':\n return True\n else:\n while top != '(' and stack:\n top = stack[-1]\n stack.pop()\n elif i in operators:\n stack.append(i)\n return False\n\nif __name__ == '__main__':\n l = \"(a)\"\n print(findDuplicate(l))\n","repo_name":"abhishekgupta5/data_structures","sub_path":"redundant_braces.py","file_name":"redundant_braces.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"74466927527","text":"from typing import List\nfrom KnarlusLogConsole import log_to_console\n\n\ndef get_token(file_path: str = \"TOKEN.txt\") -> str:\n \"\"\"\n get_token returns the first line from the file at the given path.\n\n :param file_path: path to token file\n :return: token as string\n \"\"\"\n try:\n with open(file=file_path, mode=\"r\") as token_file:\n token = token_file.readline()\n return token\n except FileNotFoundError as error:\n log_to_console(log_msg=f\"The file '{file_path}' was not found. Please set up the token file!\",\n log_function_name=\"get_token\", log_type=\"err\")\n log_to_console(log_msg=str(error),\n log_function_name=\"get_token\", log_type=\"err\")\n exit(1)\n\n\ndef get_guild_ids(file_path: str = \"GUILD_IDS.txt\") -> List[int]:\n \"\"\"\n get_guild_ids returns a list of all guild ids found in the given guild id file (standard if GUILD_IDS.txt)\n\n :param file_path: path to guild id file\n :return: list of int representing all known guild ids\n \"\"\"\n guild_ids: List[int] = []\n try:\n with open(file=file_path, mode=\"r\") as guild_id_file:\n for line in guild_id_file:\n if line:\n try:\n guild_ids.append(int(line))\n except ValueError as error:\n log_to_console(log_msg=f\"The line '{line[:-1]}' could not be converted to int. Please check \\\nguild id file {file_path}!\", log_function_name=\"get_guild_ids\", log_type=\"war\")\n log_to_console(log_msg=str(error), log_function_name=\"get_guild_ids\", log_type=\"war\")\n except FileNotFoundError as error:\n log_to_console(log_msg=f\"The file '{file_path}' was not found. Please set up the guild id file!\",\n log_function_name=\"get_guild_ids\", log_type=\"err\")\n log_to_console(log_msg=str(error),\n log_function_name=\"get_guild_ids\", log_type=\"err\")\n exit(1)\n if not guild_ids:\n log_to_console(log_msg=f\"The file '{file_path}' does not contain any guild ids. No guild will have slash \\\nsupport\", log_function_name=\"get_guild_ids\", log_type=\"war\")\n return guild_ids\n\n\nif __name__ == \"__main__\":\n print(get_token())\n print(get_guild_ids())\n","repo_name":"Knarlus/discord-get-ip","sub_path":"KnarlusReadConfig.py","file_name":"KnarlusReadConfig.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7799572885","text":"M,N,H = map(int, input().split());\nboards = [];\nfor _ in range(H):\n board = [];\n for _ in range(N):\n board.append(list(map(int,input().split())));\n boards.append(board);\n \ndef isValid(lay, row, col):\n global boards;\n try:\n if row >= 0 and col >= 0 and lay >= 0 and boards[lay][row][col] == 0:\n return True;\n except:\n return False;\n return False;\n\nstack = [];\nremain = 0;\nfor lay in range(H):\n for row in range(N):\n for col in range(M):\n if boards[lay][row][col] == 1:\n stack.append((lay,row,col));\n elif boards[lay][row][col] == 0:\n remain += 1;\nanswer = 0;\nwhile remain != 0:\n if len(stack) == 0:\n answer = -1;\n break;\n next = [];\n for lay,row,col in stack:\n arr = [(lay+1,row,col),(lay-1,row,col),\n (lay,row+1,col),(lay,row-1,col),\n (lay,row,col+1),(lay,row,col-1),];\n for l,r,c in arr:\n if isValid(l,r,c):\n next.append((l,r,c));\n boards[l][r][c]= 1;\n remain -= 1;\n stack = next;\n answer += 1;\n \nprint(answer);","repo_name":"LeeHanSeong7/AlgorithmLab","sub_path":"Baekjoon/problem/python/7569.py","file_name":"7569.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70093228647","text":"\"\"\"!\nFactor analyzer test cases\n\"\"\"\nimport math\nimport unittest\nfrom random import choice\n\nfrom pygmodels.factorf.factoranalyzer import FactorAnalyzer\nfrom pygmodels.gtype.edge import Edge, EdgeType\nfrom pygmodels.pgmtype.factor import BaseFactor, Factor\nfrom pygmodels.pgmtype.randomvariable import NumCatRVariable\n\n\nclass TestFactorAnalyzer(unittest.TestCase):\n \"\"\"!\"\"\"\n\n def setUp(self):\n \"\"\"\"\"\"\n # Koller, Friedman 2009, p. 104\n self.Bf = NumCatRVariable(\n node_id=\"B\",\n input_data={\"outcome-values\": [10, 50]},\n marginal_distribution=lambda x: 0.5,\n )\n self.Cf = NumCatRVariable(\n node_id=\"C\",\n input_data={\"outcome-values\": [10, 50]},\n marginal_distribution=lambda x: 0.5,\n )\n\n def phibc(scope_product):\n \"\"\"\"\"\"\n sfs = set(scope_product)\n if sfs == set([(\"B\", 10), (\"C\", 10)]):\n return 0.5\n elif sfs == set([(\"B\", 10), (\"C\", 50)]):\n return 0.7\n elif sfs == set([(\"B\", 50), (\"C\", 10)]):\n return 0.1\n elif sfs == set([(\"B\", 50), (\"C\", 50)]):\n return 0.2\n else:\n raise ValueError(\"unknown arg\")\n\n self.bc = Factor(\n gid=\"bc\", scope_vars=set([self.Bf, self.Cf]), factor_fn=phibc\n )\n self.bc_b = BaseFactor(\n gid=\"bc\", scope_vars=set([self.Bf, self.Cf]), factor_fn=phibc\n )\n\n def test_cls_max_value(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer.cls_max_value(self.bc)\n self.assertEqual(mval, set([(\"B\", 10), (\"C\", 50)]))\n\n def test_cls_max_probability(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer.cls_max_probability(self.bc)\n self.assertEqual(mval, 0.7)\n\n def test_cls_min_value(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer.cls_min_value(self.bc)\n self.assertEqual(mval, set([(\"B\", 50), (\"C\", 10)]))\n\n def test_cls_min_probability(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer.cls_min_probability(self.bc)\n self.assertEqual(mval, 0.1)\n\n def test_max_value(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer(self.bc_b).max_value()\n self.assertEqual(mval, set([(\"B\", 10), (\"C\", 50)]))\n\n def test_max_probability(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer(self.bc_b).max_probability()\n self.assertEqual(mval, 0.7)\n\n def test_min_value(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer(self.bc_b).min_value()\n self.assertEqual(mval, set([(\"B\", 50), (\"C\", 10)]))\n\n def test_min_probability(self):\n \"\"\"\"\"\"\n mval = FactorAnalyzer(self.bc_b).min_probability()\n self.assertEqual(mval, 0.1)\n\n @unittest.skip(\"FactorAnalyzer.normalize not yet implemented\")\n def test_normalize(self):\n \"\"\"\"\"\"\n pass\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"D-K-E/graphical-models","sub_path":"test/test_factoranlayzer.py","file_name":"test_factoranlayzer.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"74138932648","text":"# =====================================================================================================\r\n# PROBLEM C4\r\n#\r\n# Build and train a classifier for the sarcasm dataset.\r\n# The classifier should have a final layer with 1 neuron activated by sigmoid.\r\n#\r\n# Do not use lambda layers in your model.\r\n#\r\n# Dataset used in this problem is built by Rishabh Misra (https://rishabhmisra.github.io/publications).\r\n#\r\n# Desired accuracy and validation_accuracy > 75%\r\n# =======================================================================================================\r\n\r\nimport json\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport urllib\r\nfrom tensorflow.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\n\r\ndef solution_C4():\r\n data_url = 'https://github.com/dicodingacademy/assets/raw/main/Simulation/machine_learning/sarcasm.json'\r\n urllib.request.urlretrieve(data_url, 'sarcasm.json')\r\n\r\n # DO NOT CHANGE THIS CODE\r\n # Make sure you used all of these parameters or test may fail\r\n vocab_size = 1000\r\n embedding_dim = 16\r\n max_length = 120\r\n trunc_type = 'post'\r\n padding_type = 'post'\r\n oov_tok = \"\"\r\n training_size = 20000\r\n\r\n sentences = []\r\n labels = []\r\n # YOUR CODE HERE\r\n with open(\"sarcasm.json\", 'r') as f:\r\n datastore = json.load(f)\r\n for item in datastore:\r\n sentences.append(item['headline'])\r\n labels.append(item['is_sarcastic'])\r\n\r\n train_sentences = sentences[0:training_size]\r\n train_labels = labels[0:training_size]\r\n val_sentences = sentences[training_size:]\r\n val_labels = labels[training_size:]\r\n\r\n # Fit your tokenizer with training data\r\n tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\r\n tokenizer.fit_on_texts(train_sentences)\r\n word_index = tokenizer.word_index\r\n\r\n train_seq = tokenizer.texts_to_sequences(train_sentences)\r\n train_pad = pad_sequences(train_seq, maxlen=max_length, padding=padding_type, truncating=trunc_type)\r\n\r\n val_seq = tokenizer.texts_to_sequences(val_sentences)\r\n val_pad = pad_sequences(val_seq, maxlen=max_length, padding=padding_type, truncating=trunc_type)\r\n\r\n train_labels = np.array(train_labels)\r\n val_labels = np.array(val_labels)\r\n\r\n model = tf.keras.Sequential([\r\n # YOUR CODE HERE. DO not change the last layer or test may fail\r\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\r\n tf.keras.layers.GlobalAveragePooling1D(),\r\n tf.keras.layers.Dense(24, activation='relu'),\r\n tf.keras.layers.Dense(1, activation='sigmoid')\r\n ])\r\n model.compile(\r\n loss='binary_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy']\r\n )\r\n model.fit(train_pad, train_labels, epochs=30, validation_data=(val_pad, val_labels))\r\n return model\r\n\r\n\r\n# The code below is to save your model as a .h5 file.\r\n# It will be saved automatically in your Submission folder.\r\nif __name__ == '__main__':\r\n # DO NOT CHANGE THIS CODE\r\n model = solution_C4()\r\n model.save(\"model_C4.h5\")\r\n","repo_name":"leonardogianto/tensorflow-dev-1","sub_path":"SubmissionC/Problem_C4.py","file_name":"Problem_C4.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"74514799847","text":"import glob\nimport itertools\nimport os\nfrom tabnanny import check\n\nimport numpy as np\nimport torch\nimport torchvision\nimport wandb\nfrom tqdm import tqdm\nimport yaml\nimport argparse\n\n\nfrom nerf import models, load_blender_data, load_llff_data, get_ray_bundle, meshgrid_xy, CfgNode\n\ndef str_to_bool(value):\n if isinstance(value, bool):\n return value\n if value.lower() in {'false', 'f', '0', 'no', 'n'}:\n return False\n elif value.lower() in {'true', 't', '1', 'yes', 'y'}:\n return True\n raise ValueError(f'{value} is not a valid boolean value')\n\ndef load_config():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', required=True, \n help='config file path')\n # parser.add_argument(\"--expname\", type=str, \n # help='experiment name', required=False)\n # parser.add_argument(\"--basedir\", type=str, default='cache/nerf_synthetic/lego', \n # help='where to store ckpts and logs')\n # parser.add_argument(\"--datadir\", type=str, default='./data/llff/fern', \n # help='input data directory')\n\n # training options\n parser.add_argument(\"--depth\", type=int, \n help='layers in network')\n parser.add_argument(\"--width\", type=int, \n help='channels per layer')\n parser.add_argument(\"--num_random_rays\", type=int, \n help='batch size (number of random rays per gradient step)')\n parser.add_argument(\"--lr\", type=float, \n help='learning rate')\n parser.add_argument(\"--lr_ensemble\", type=float, \n help='learning rate of ensemble fully corrective steps')\n parser.add_argument(\"--lr_decay_weak\", type=int, \n help='exponential learning rate decay for weak lr')\n parser.add_argument(\"--lr_decay_corrective\", type=int, \n help='exponential learning rate decay for corrective lr')\n parser.add_argument(\"--lr_decay_factor_weak\", type=float, \n help='strength of the decay for weak lr')\n parser.add_argument(\"--lr_decay_factor_corrective\", type=float, \n help='strength of the decay for corrective lr')\n parser.add_argument(\"--chunksize\", type=int, \n help='number of rays processed in parallel, decrease if running out of memory')\n # parser.add_argument(\"--netchunk\", type=int, default=1024*64, \n # help='number of pts sent through network in parallel, decrease if running out of memory')\n # parser.add_argument(\"--no_batching\", action='store_true', \n # help='only take random rays from 1 image at a time')\n # parser.add_argument(\"--no_reload\", action='store_true', \n # help='do not reload weights from saved ckpt')\n # parser.add_argument(\"--ft_path\", type=str, default=None, \n # help='specific weights npy file to reload for coarse network')\n\n # rendering options\n parser.add_argument(\"--samples_coarse\", type=int, \n help='number of coarse samples per ray')\n parser.add_argument(\"--samples_fine\", type=int,\n help='number of additional fine samples per ray')\n parser.add_argument(\"--perturb\", type=str_to_bool,\n help='set to False for no jitter, True for jitter')\n parser.add_argument(\"--use_viewdirs\", type=str_to_bool, \n help='use full 5D input instead of 3D')\n # parser.add_argument(\"--i_embed\", type=int, \n # help='set 0 for default positional encoding, -1 for none')\n parser.add_argument(\"--num_encoding_xyz\", type=int, \n help='log2 of max freq for positional encoding (3D location)')\n parser.add_argument(\"--num_encoding_dir\", type=int, \n help='log2 of max freq for positional encoding (2D direction)')\n parser.add_argument(\"--raw_noise_std\", type=float, \n help='std dev of noise added to regularize sigma_a output, 1e0 recommended')\n\n # parser.add_argument(\"--render_only\", action='store_true', \n # help='do not optimize, reload weights and render out render_poses path')\n # parser.add_argument(\"--render_test\", action='store_true', \n # help='render the test set instead of render_poses path')\n # parser.add_argument(\"--render_factor\", type=int, \n # help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')\n\n # training options\n # parser.add_argument(\"--precrop_iters\", type=int, default=0,\n # help='number of steps to train on central crops')\n # parser.add_argument(\"--precrop_frac\", type=float,\n # default=.5, help='fraction of img taken for central crops') \n\n # dataset options\n parser.add_argument(\"--dataset_type\", type=str, \n help='options: llff / blender / deepvoxels')\n parser.add_argument(\"--testskip\", type=int, \n help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')\n\n ## deepvoxels flags\n # parser.add_argument(\"--shape\", type=str, default='greek', \n # help='options : armchair / cube / greek / vase')\n\n ## blender flags\n parser.add_argument(\"--white_bkgd\", type=str_to_bool, \n help='set to render synthetic data on a white bkgd (always use for dvoxels)')\n parser.add_argument(\"--half_res\", type=str_to_bool, \n help='load blender synthetic data at 400x400 instead of 800x800')\n\n ## llff flags\n # parser.add_argument(\"--factor\", type=int, default=8, \n # help='downsample factor for LLFF images')\n # parser.add_argument(\"--no_ndc\", action='store_true', \n # help='do not use normalized device coordinates (set for non-forward facing scenes)')\n # parser.add_argument(\"--lindisp\", action='store_true', \n # help='sampling linearly in disparity rather than depth')\n # parser.add_argument(\"--spherify\", action='store_true', \n # help='set for spherical 360 scenes')\n # parser.add_argument(\"--llffhold\", type=int, default=8, \n # help='will take every 1/N images as LLFF test set, paper uses 8')\n\n # logging/saving options\n # parser.add_argument(\"--i_print\", type=int, default=100, \n # help='frequency of console printout and metric loggin')\n # parser.add_argument(\"--i_img\", type=int, default=500, \n # help='frequency of tensorboard image logging')\n # parser.add_argument(\"--i_weights\", type=int, default=10000, \n # help='frequency of weight ckpt saving')\n # parser.add_argument(\"--i_testset\", type=int, default=50000, \n # help='frequency of testset saving')\n # parser.add_argument(\"--i_video\", type=int, default=50000, \n # help='frequency of render_poses video saving')\n\n ### Mine ###\n\n parser.add_argument(\"--max_mins\", type=int, \n help='Maximum duration of the experiment, in minutes')\n parser.add_argument(\"--load-checkpoint\", type=str, default=\"\",\n help=\"Path to load saved checkpoint from. Creates separate run\",)\n parser.add_argument(\"--resume\", type=str_to_bool,\n help=\"Resume from last checkpoint. (On wandb too)\")\n parser.add_argument(\"--run-name\", type=str, required=True,\n help=\"Name of the run (for wandb), leave empty for random name.\")\n\n parser.add_argument(\"--weak_iters\", type=int, \n help='number of iterations for the training of a weak learner')\n parser.add_argument(\"--corrective_iters\", type=int, \n help='number of corrective steps of the ensemble')\n parser.add_argument(\"--n_stages\", type=int, \n help='final number of weak learners')\n parser.add_argument(\"--boost_rate\", type=float, \n help='starting boosting rate of grownet')\n parser.add_argument(\"--learn_boost_rate\", type=str_to_bool,\n help=\"Update boost rate with training. Default=False\")\n parser.add_argument(\"--propagate_context\", type=str_to_bool,\n help=\"Propagation of the penultimate layer to the input of the next weak learner. Default=True\")\n parser.add_argument(\"--render_activation_fn\", type=str, \n help='torch activation function to use (sigmoid, tanh, ...)')\n\n parser.add_argument(\"--lr_reset_weak\", type=str_to_bool,\n help=\"Wether to restart the scheduler for each new weak learner\")\n parser.add_argument(\"--lr_reset_corrective\", type=str_to_bool,\n help=\"Wether to restart the scheduler for each corrective phase\")\n parser.add_argument(\"--lr_decay_corrective_peaked\", type=float, \n help='multiplier used to create peaks in the scheduler (works only if lr_reset_corrective: False)')\n parser.add_argument(\"--no_fine\", type=str_to_bool,\n help=\"Wether to use the fine model\")\n parser.add_argument(\"--hierarchical_factor\", type=float, \n help='weak hidden size = hidden_size*factor**stage')\n\n args = parser.parse_args()\n\n # Read config file.\n cfg = None\n with open(args.config, \"r\") as f:\n cfg_dict = yaml.load(f, Loader=yaml.FullLoader)\n cfg = CfgNode(cfg_dict)\n\n # Update cfg with user arguments\n if args.chunksize is not None:\n cfg.nerf.train.chunksize= args.chunksize\n cfg.nerf.validation.chunksize= args.chunksize\n if args.dataset_type is not None:\n cfg.dataset.type= args.dataset_type\n if args.depth is not None:\n cfg.models.coarse.num_layers = args.depth\n cfg.models.fine.num_layers = args.depth\n if args.half_res is not None:\n cfg.dataset.half_res = args.half_res\n if args.lr is not None:\n cfg.optimizer.lr = args.lr\n if args.lr_decay_weak is not None:\n cfg.scheduler.lr_decay_weak = args.lr_decay_weak\n if args.lr_decay_corrective is not None:\n cfg.scheduler.lr_decay_corrective = args.lr_decay_corrective\n if args.lr_decay_factor_weak is not None:\n cfg.scheduler.lr_decay_factor_weak = args.lr_decay_factor_weak\n if args.lr_decay_factor_corrective is not None:\n cfg.scheduler.lr_decay_factor_corrective = args.lr_decay_factor_corrective\n if args.lr_ensemble is not None:\n cfg.optimizer.lr_ensemble = args.lr_ensemble\n if args.num_encoding_dir is not None:\n cfg.models.coarse.num_encoding_fn_dir = args.num_encoding_dir\n cfg.models.fine.num_encoding_fn_dir = args.num_encoding_dir\n if args.num_encoding_xyz is not None:\n cfg.models.coarse.num_encoding_fn_xyz = args.num_encoding_xyz\n cfg.models.fine.num_encoding_fn_xyz = args.num_encoding_xyz\n if args.num_random_rays is not None:\n cfg.nerf.train.num_random_rays = args.num_random_rays\n if args.perturb is not None:\n cfg.nerf.train.perturb = args.perturb\n if args.raw_noise_std is not None:\n cfg.nerf.train.radiance_field_noise_std = args.raw_noise_std\n if args.samples_coarse is not None:\n cfg.nerf.train.num_coarse = args.samples_coarse\n if args.samples_fine is not None:\n cfg.nerf.train.num_fine = args.samples_fine\n if args.testskip is not None:\n cfg.dataset.testskip = args.testskip\n if args.use_viewdirs is not None:\n cfg.models.coarse.use_viewdirs = args.use_viewdirs\n cfg.models.fine.use_viewdirs = args.use_viewdirs\n cfg.nerf.use_viewdirs = args.use_viewdirs\n if args.white_bkgd is not None:\n cfg.nerf.train.white_background = args.white_bkgd\n cfg.nerf.validation.white_background = args.white_bkgd\n if args.width is not None:\n cfg.models.coarse.hidden_size = args.width\n cfg.models.fine.hidden_size = args.width\n if args.weak_iters is not None:\n cfg.experiment.weak_train_iters = args.weak_iters\n if args.corrective_iters is not None:\n cfg.experiment.corrective_iters = args.corrective_iters\n if args.n_stages is not None:\n cfg.experiment.n_stages = args.n_stages\n if args.boost_rate is not None:\n cfg.experiment.boost_rate = args.boost_rate\n if args.learn_boost_rate is not None:\n cfg.experiment.learn_boost_rate = args.learn_boost_rate\n if args.render_activation_fn is not None:\n cfg.experiment.render_activation_fn = args.render_activation_fn\n if args.lr_decay_corrective_peaked is not None:\n cfg.scheduler.lr_decay_corrective_peaked = args.lr_decay_corrective_peaked\n if args.no_fine is not None:\n cfg.models.no_fine = args.no_fine \n cfg.nerf.train.num_fine = 0\n cfg.nerf.validation.num_fine = 0\n if args.lr_reset_weak is not None:\n cfg.scheduler.lr_reset_weak = args.lr_reset_weak\n if args.lr_reset_corrective is not None:\n cfg.scheduler.lr_reset_corrective = args.lr_reset_corrective\n if args.hierarchical_factor is not None:\n cfg.models.coarse.hierarchical_factor = args.hierarchical_factor\n cfg.models.fine.hierarchical_factor = args.hierarchical_factor\n if args.propagate_context is not None:\n cfg.experiment.propagate_context = args.propagate_context\n \n\n return cfg, args\n\n\ndef save_checkpoint(cfg, stage, epoch, loss, model_coarse, model_fine, ensemble_coarse, ensemble_fine, optimizer, psnr):\n checkpoint_dict = {\n \"cfg\": cfg,\n \"run_id\": wandb.run.id,\n \"ensemble_stage\": stage,\n \"weak_epoch\": epoch,\n \"model_coarse_state_dict\": model_coarse.state_dict(),\n \"model_fine_state_dict\": None if not model_fine else model_fine.state_dict(),\n \"ensemble_coarse_state_dict\": ensemble_coarse.state_dict(),\n \"ensemble_fine_state_dict\": None if not ensemble_fine else ensemble_fine.state_dict(),\n \"weak_optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": loss,\n \"psnr\": psnr,\n }\n\n checkpoint_path = os.path.join(cfg.experiment.logdir, wandb.run.name, \"checkpoint_stage\" + str(stage).zfill(2) + \"_epoch\" + str(epoch).zfill(5) + \".ckpt\")\n torch.save(\n checkpoint_dict,\n checkpoint_path\n )\n tqdm.write(\"================== Saved Checkpoint =================\")\n\n return checkpoint_path\n\n\ndef load_checkpoint(configargs, device, net_ensemble_coarse, net_ensemble_fine):\n checkpoint = torch.load(configargs.load_checkpoint)\n cfg = checkpoint[\"cfg\"]\n start_stage = checkpoint[\"ensemble_stage\"]\n start_weak_epoch = checkpoint[\"weak_epoch\"]\n run_id = checkpoint[\"run_id\"]\n\n # Load weak models\n weak_model_coarse = get_model_coarse(cfg, start_stage)\n weak_model_fine = get_model_fine(cfg, start_stage)\n weak_model_coarse.load_state_dict(checkpoint[\"model_coarse_state_dict\"])\n weak_model_coarse.to(device)\n if checkpoint[\"model_fine_state_dict\"]:\n weak_model_fine.load_state_dict(checkpoint[\"model_fine_state_dict\"])\n weak_model_fine.to(device)\n\n # Initialize weak optimizer\n trainable_parameters = list(weak_model_coarse.parameters())\n if weak_model_fine is not None:\n trainable_parameters += list(weak_model_fine.parameters())\n optimizer = getattr(torch.optim, cfg.optimizer.type)(\n trainable_parameters, lr=cfg.optimizer.lr\n )\n optimizer.load_state_dict(checkpoint[\"weak_optimizer_state_dict\"])\n\n # Load ensembles\n net_ensemble_coarse.load_state_dict(checkpoint[\"ensemble_coarse_state_dict\"], cfg, get_model_coarse)\n if checkpoint[\"ensemble_fine_state_dict\"]:\n net_ensemble_fine.load_state_dict(checkpoint[\"ensemble_fine_state_dict\"], cfg, get_model_fine)\n print(f'Loaded checkpoint: logdir={cfg.experiment.logdir} run_id={run_id} stage={start_stage}, weak_epoch={start_weak_epoch} loss={checkpoint[\"loss\"]}, psnr={checkpoint[\"psnr\"]}')\n return cfg, run_id, start_stage, start_weak_epoch, weak_model_coarse, weak_model_fine\n\n\ndef get_model_coarse(cfg, stage):\n if hasattr(cfg.experiment, \"propagate_context\") and not cfg.experiment.propagate_context:\n stage = 0\n if not hasattr(cfg.models.coarse, \"hierarchical_factor\"):\n cfg.models.coarse.hierarchical_factor = 1.0\n\n model_coarse = getattr(models, cfg.models.coarse.type)(\n num_layers=cfg.models.coarse.num_layers,\n hidden_size=int(cfg.models.coarse.hidden_size * cfg.models.coarse.hierarchical_factor**stage),\n skip_connect_every=cfg.models.coarse.skip_connect_every,\n num_encoding_fn_xyz=cfg.models.coarse.num_encoding_fn_xyz,\n num_encoding_fn_dir=cfg.models.coarse.num_encoding_fn_dir,\n include_input_xyz=cfg.models.coarse.include_input_xyz,\n include_input_dir=cfg.models.coarse.include_input_dir,\n use_viewdirs=cfg.models.coarse.use_viewdirs,\n append_penultimate=stage,\n prev_penultimate_size=int(cfg.models.coarse.hidden_size * cfg.models.coarse.hierarchical_factor**(stage-1))\n )\n return model_coarse\n\n\ndef get_model_fine(cfg, stage):\n if hasattr(cfg.experiment, \"propagate_context\") and not cfg.experiment.propagate_context:\n stage = 0\n if not hasattr(cfg.models.fine, \"hierarchical_factor\"):\n cfg.models.fine.hierarchical_factor = 1.0\n\n model_fine = None\n # if hasattr(cfg.models, \"fine\"):\n if (not hasattr(cfg.models, \"no_fine\")) or (not cfg.models.no_fine):\n model_fine = getattr(models, cfg.models.fine.type)(\n num_layers=cfg.models.fine.num_layers,\n hidden_size=int(cfg.models.fine.hidden_size * cfg.models.fine.hierarchical_factor**stage),\n skip_connect_every=cfg.models.fine.skip_connect_every,\n num_encoding_fn_xyz=cfg.models.fine.num_encoding_fn_xyz,\n num_encoding_fn_dir=cfg.models.fine.num_encoding_fn_dir,\n include_input_xyz=cfg.models.fine.include_input_xyz,\n include_input_dir=cfg.models.fine.include_input_dir,\n use_viewdirs=cfg.models.fine.use_viewdirs,\n append_penultimate=stage,\n prev_penultimate_size=int(cfg.models.fine.hidden_size * cfg.models.fine.hierarchical_factor**(stage-1))\n )\n return model_fine\n\n\ndef get_img_grid(h, w, n, images, margin=1): # from internet\n if len(images) != n:\n raise ValueError('Number of images ({}) does not match '\n 'matrix size {}x{}'.format(len(images), w, h))\n\n imgs = images\n\n if any(i.shape != imgs[0].shape for i in imgs[1:]):\n raise ValueError('Not all images have the same shape.')\n\n img_h, img_w, img_c = imgs[0].shape\n\n m_x = 0\n m_y = 0\n if margin is not None:\n m_x = int(margin)\n m_y = m_x\n\n imgmatrix = np.zeros((img_h * h + m_y * (h - 1),\n img_w * w + m_x * (w - 1),\n img_c),\n np.uint8)\n\n imgmatrix.fill(255)\n\n imgmatrix = np.zeros((img_h * h + m_y * (h - 1),\n img_w * w + m_x * (w - 1),\n img_c),\n np.uint8)\n\n imgmatrix.fill(255)\n\n positions = itertools.product(range(w), range(h))\n for (x_i, y_i), img in zip(positions, imgs):\n x = x_i * (img_w + m_x)\n y = y_i * (img_h + m_y)\n imgmatrix[y:y + img_h, x:x + img_w, :] = img\n\n return imgmatrix\n\n\ndef load_data(cfg):\n USE_CACHED_DATASET = False\n train_paths, validation_paths = None, None\n images, poses, render_poses, hwf, i_split = None, None, None, None, None\n H, W, focal, i_train, i_val, i_test = None, None, None, None, None, None\n if hasattr(cfg.dataset, \"cachedir\") and os.path.exists(cfg.dataset.cachedir):\n train_paths = glob.glob(os.path.join(cfg.dataset.cachedir, \"train\", \"*.data\"))\n validation_paths = glob.glob(\n os.path.join(cfg.dataset.cachedir, \"val\", \"*.data\")\n )\n USE_CACHED_DATASET = True\n print(\"Found cache.\")\n else:\n # Load dataset\n print(\"No cache found or set, loading dataset...\")\n images, poses, render_poses, hwf = None, None, None, None\n if cfg.dataset.type.lower() == \"blender\":\n images, poses, render_poses, hwf, i_split = load_blender_data(\n cfg.dataset.basedir,\n half_res=cfg.dataset.half_res,\n testskip=cfg.dataset.testskip,\n )\n i_train, i_val, i_test = i_split # select data indices for training, validation, and testing\n H, W, focal = hwf\n H, W = int(H), int(W)\n hwf = [H, W, focal]\n if cfg.nerf.train.white_background:\n images = images[..., :3] * images[..., -1:] + (1.0 - images[..., -1:])\n elif cfg.dataset.type.lower() == \"llff\":\n images, poses, bds, render_poses, i_test = load_llff_data(\n cfg.dataset.basedir, factor=cfg.dataset.downsample_factor\n )\n hwf = poses[0, :3, -1]\n poses = poses[:, :3, :4]\n if not isinstance(i_test, list):\n i_test = [i_test]\n if cfg.dataset.llffhold > 0:\n i_test = np.arange(images.shape[0])[:: cfg.dataset.llffhold]\n i_val = i_test\n i_train = np.array(\n [\n i\n for i in np.arange(images.shape[0])\n if (i not in i_test and i not in i_val)\n ]\n )\n H, W, focal = hwf\n H, W = int(H), int(W)\n hwf = [H, W, focal]\n images = torch.from_numpy(images)\n poses = torch.from_numpy(poses)\n\n data_dict = {\n \"i_train\": i_train,\n \"i_val\": i_val,\n \"images\": images,\n \"poses\": poses,\n \"train_paths\": train_paths,\n \"validation_paths\": validation_paths\n }\n\n return hwf, USE_CACHED_DATASET, data_dict\n\n\ndef cast_to_image(tensor):\n # Input tensor is (H, W, 3). Convert to (3, H, W).\n tensor = tensor.permute(2, 0, 1)\n # Convert to PIL Image and then np.array (output shape: (H, W, 3))\n img = np.array(torchvision.transforms.ToPILImage()(tensor.detach().cpu()))\n # Map back to shape (3, H, W), as tensorboard needs channels first.\n img = np.moveaxis(img, [-1], [0])\n return img\n\n\ndef get_random_rays(cfg, hwf, USE_CACHED_DATASET, device, data_dict):\n \"\"\"\n Collect a bunch of random rays (default:1024) with ground truth from a single random image.\\n\n Rays are defined by `ray_origins[]` and `ray_directions[]`.\n\n :param nerf.cfgnode.CfgNode cfg: dictionary with all config parameters\n :param list[int,int,float] hwf: list containing image height, width, and cameras focal length\n :param bool USE_CACHED_DATASET: cache folder must exist for it to have an effect\n :param str device: usually \"cuda\" or \"cpu\"\n :param data_dict: contains i_train, images, poses and train_paths\n - None | list[int] i_train: indices of data images used for training, ignored if using cached dataset\n - None | torch.Tensor images: loaded dataset [H,W,N,RGB], ignored if using cached dataset\n - None | torch.Tensor poses: camera parameters, one camera for each image. Ignored if using cached dataset\n - list[str] | None train_paths: one path for each training image, ignored if dataset is NOT cached\n :return: hwf, ray_directions, ray_origins, target_ray_values\n :rtype: tuple[ list[int,int,float], torch.Tensor, torch.Tensor, torch.Tensor]\n \"\"\"\n\n if USE_CACHED_DATASET:\n datafile = np.random.choice(data_dict[\"train_paths\"])\n cache_dict = torch.load(datafile)\n H = cache_dict[\"height\"]\n W = cache_dict[\"width\"]\n focal = cache_dict[\"focal_length\"]\n if not hwf:\n wandb.config.HWF = [H, W, focal]\n hwf = H, W, focal\n ray_bundle = cache_dict[\"ray_bundle\"].to(device)\n ray_origins, ray_directions = (\n ray_bundle[0].reshape((-1, 3)),\n ray_bundle[1].reshape((-1, 3)),\n )\n target_ray_values = cache_dict[\"target\"][..., :3].reshape((-1, 3))\n select_inds = np.random.choice(\n ray_origins.shape[0],\n size=cfg.nerf.train.num_random_rays,\n replace=False,\n )\n ray_origins, ray_directions = (\n ray_origins[select_inds],\n ray_directions[select_inds],\n )\n target_ray_values = target_ray_values[select_inds].to(device)\n # ray_bundle = torch.stack([ray_origins, ray_directions], dim=0).to(device)\n else:\n H, W, focal = hwf\n img_idx = np.random.choice(data_dict[\"i_train\"])\n img_target = data_dict[\"images\"][img_idx].to(device)\n pose_target = data_dict[\"poses\"][img_idx, :3, :4].to(device)\n ray_origins, ray_directions = get_ray_bundle(H, W, focal, pose_target)\n coords = torch.stack(\n meshgrid_xy(torch.arange(H).to(device), torch.arange(W).to(device)),\n dim=-1,\n )\n coords = coords.reshape((-1, 2))\n select_inds = np.random.choice(\n coords.shape[0], size=cfg.nerf.train.num_random_rays, replace=False\n )\n select_inds = coords[select_inds]\n ray_origins = ray_origins[select_inds[:, 0], select_inds[:, 1], :]\n ray_directions = ray_directions[select_inds[:, 0], select_inds[:, 1], :]\n # batch_rays = torch.stack([ray_origins, ray_directions], dim=0)\n target_s = img_target[select_inds[:, 0], select_inds[:, 1], :]\n target_ray_values = target_s\n\n return hwf, ray_directions, ray_origins, target_ray_values\n","repo_name":"AndreiDiaconu97/GrowNeRF","sub_path":"train_nerf_utils.py","file_name":"train_nerf_utils.py","file_ext":"py","file_size_in_byte":25850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70092901287","text":"\"\"\"\ninterface graphique pour projet 12 jours\n\nIl contient les composants de la spécification.\n\"\"\"\n# declaration des paquets\n\nimport tkinter as tk # nécessaire pour interface graphique\nfrom tkinter import scrolledtext as stext\nfrom tkinter import filedialog as FD\nimport os # pour manipulation des chemins\n\n# fin declaration des paquets\nclass MainWindow:\n def __init__(self, master):\n # declaration de cadre principal\n self.main_frame = tk.Frame(master=master)\n self.main_frame.pack(fill=tk.BOTH, expand=1)\n\n # les elements de cadre principal\n self.label_titre = tk.Label(\n master=self.main_frame, text=\"Application 12 jours\", font=(\"FreeSerif\", 16)\n )\n self.label_titre.pack(side=tk.TOP, fill=tk.X, expand=0)\n self.sub_frame = tk.Frame(master=self.main_frame)\n self.sub_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n # fin des elements de cadre principal\n\n # elements de sub_frame\n self.zone_affichage_text = stext.ScrolledText(master=self.sub_frame)\n self.zone_affichage_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)\n self.tool_frame = tk.Frame(master=self.sub_frame)\n self.tool_frame.pack(side=tk.LEFT, fill=tk.Y, expand=1)\n # fin des elements de sub_frame\n\n # elements de liste\n self.list_frame = tk.Frame(master=self.tool_frame)\n self.list_frame.pack(side=tk.TOP, fill=tk.Y, expand=1)\n\n self.cbar = tk.Scrollbar(master=self.list_frame)\n self.cbar.pack(side=tk.RIGHT, fill=tk.Y, expand=1)\n\n self.liste_chemin = tk.Listbox(\n master=self.list_frame, yscrollcommand=self.cbar.set\n )\n self.liste_chemin.pack(side=tk.LEFT, fill=tk.Y, expand=1)\n for i in range(0, 200):\n self.liste_chemin.insert(tk.END, \"chemin \" + str(i))\n #\n self.cbar.config(command=self.liste_chemin.yview)\n\n # fin des elements de liste\n # cadre des boutons\n self.button_frame = tk.Frame(master=self.tool_frame)\n self.button_frame.pack(side=tk.TOP, fill=tk.X, expand=1)\n\n self.charge_btn = tk.Button(master=self.button_frame, \n text=\"Charger\", command=self.charger_fichiers)\n self.charge_btn.pack(side=tk.TOP, fill=tk.X, expand=1)\n\n self.ouvrir_btn = tk.Button(master=self.button_frame, \n text=\"Ouvrir\", command=self.ouvrir_text)\n self.ouvrir_btn.pack(side=tk.TOP, fill=tk.X, expand=1)\n\n self.supprimer_btn = tk.Button(master=self.button_frame, \n text=\"Supprimer\", command=self.supprimer_fichiers)\n self.supprimer_btn.pack(side=tk.TOP, fill=tk.X, expand=1)\n # fin de cadre des boutons\n\n # cadre de listes des mots cles\n self.mot_frame = tk.Frame(master=self.tool_frame)\n self.mot_frame.pack(side=tk.TOP, fill=tk.Y, expand=1)\n\n self.mbar = tk.Scrollbar(master=self.mot_frame)\n self.mbar.pack(side=tk.RIGHT, fill=tk.Y, expand=1)\n\n self.mbox = tk.Listbox(master=self.mot_frame, yscrollcommand=self.mbar.set)\n self.mbox.pack(side=tk.LEFT, fill=tk.Y, expand=1)\n for m in range(10, 120):\n self.mbox.insert(tk.END, \"mot \" + str(m))\n #\n self.mbar.config(command=self.mbox.yview)\n # fin cadre de liste des mots cles\n\n # cadre d'entre \n self.entre_frame = tk.Frame(master=self.tool_frame)\n self.entre_frame.pack(side=tk.TOP, fill=tk.X, expand=1)\n\n self.entre = tk.Entry(master=self.entre_frame)\n self.entre.pack(side=tk.TOP, fill=tk.X, expand=1)\n self.entre.delete(0, tk.END)\n self.entre.insert(0, \"valeur par défaut\")\n\n # fin cadre d'entre\n # cadre des boutons des mots\n self.mot_button_frame = tk.Frame(master=self.tool_frame)\n self.mot_button_frame.pack(side=tk.TOP, fill=tk.X, expand=1)\n\n self.ajouter_btn = tk.Button(master=self.mot_button_frame, \n text=\"Ajouter\", command=self.ajouter_mots_cles)\n self.ajouter_btn.pack(side=tk.TOP, fill=tk.X, expand=1)\n\n self.mot_supprimer_btn = tk.Button(master=self.mot_button_frame, \n text=\"Supprimer\", command=self.supprimer_mot_cles)\n self.mot_supprimer_btn.pack(side=tk.TOP, fill=tk.X, expand=1)\n\n self.filtrer_btn = tk.Button(master=self.mot_button_frame,\n text=\"Filtrer\", command=self.filtrage_par_mot_cle)\n self.filtrer_btn.pack(side=tk.TOP, fill=tk.X, expand=1)\n self.chemins_de_text = []\n self.dirname = \"\"\n self.liste_de_chemin = []\n\n def charger_fichiers(self):\n \"charge les chemins à la liste\"\n chemins = FD.askopenfilenames()\n if not chemins:\n return\n self.chemins_de_text = list(chemins)\n self.liste_chemin.delete(0, tk.END)\n for chemin in self.chemins_de_text:\n name = os.path.basename(chemin)\n self.dirname = os.path.dirname(chemin)\n self.liste_chemin.insert(tk.END, name)\n return\n\n def supprimer_fichiers(self):\n \"supprime les chemins de la liste et chemin_de_text\"\n chemin = self.liste_chemin.curselection()\n indice_de_selection = chemin[0]\n name = self.liste_chemin.get(indice_de_selection)\n chemins = []\n supprime_chemin = os.path.join(self.dirname, name)\n print(\"chemin à supprimer:\", supprime_chemin)\n print(\"chemins\", str(self.chemins_de_text))\n for chem in self.chemins_de_text:\n if supprime_chemin != chem:\n chemins.append(chem)\n self.chemins_de_text = chemins\n print(\"nouveaux chemins:\", str(self.chemins_de_text))\n self.liste_chemin.delete(indice_de_selection)\n return\n \n def ouvrir_text(self):\n \"Ouvre le texte et visualise dans la zone de texte\"\n selection = self.liste_chemin.curselection()\n indice_de_selection = selection[0]\n name = self.liste_chemin.get(indice_de_selection)\n abschemin = os.path.join(self.dirname, name)\n with open(abschemin, \"r\", encoding='utf-8') as f:\n text = f.read()\n self.zone_affichage_text.delete(1.0, tk.END)\n self.zone_affichage_text.insert(tk.END, text)\n\n def ajouter_mots_cles(self):\n \"Ajoute des mots clés sais à la liste\"\n mot_actuel = self.entre.get()\n if mot_actuel and mot_actuel != \"valeur par défaut\":\n self.mbox.insert(tk.END, mot_actuel)\n return\n\n def supprimer_mot_cles(self):\n \"supprime des mots cles de la liste des mots cles\"\n selection = self.mbox.curselection()\n for indice in selection:\n self.mbox.delete(indice)\n \n def lire_contenu_texte(self, chemin: str) -> str:\n \"Lit le contenu du texte\"\n with open(chemin, 'r', encoding='utf-8') as f:\n texte = f.read()\n return texte\n\n def controle_de_texte(self, texte: str, mot_cles: str) -> bool:\n \"Controle si le mot cles existe dans le texte\"\n return bool(mot_cles in texte)\n\n def garder(self, chemin: str, resultat_de_control: bool) -> None:\n \"garde le chemin si resultat de control est vrai\" \n if resultat_de_control:\n self.liste_de_chemin.append(chemin)\n return\n\n def affichage(self):\n \"\"\n self.liste_chemin.delete(0, tk.END)\n for chemin in self.liste_de_chemin:\n name = os.path.basename(chemin)\n self.liste_chemin.insert(tk.END, name)\n return\n\n def filtrage_par_mot_cle(self):\n \"Filtre la liste des chemins par mot cles\"\n selection = self.mbox.curselection()\n indice = selection[0]\n mot = self.mbox.get(indice)\n for chemin in self.chemins_de_text:\n contenu = self.lire_contenu_texte(chemin)\n control = self.controle_de_texte(contenu, mot)\n self.garder(chemin, control)\n self.affichage()\n return\n \n\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n monapp = MainWindow(root)\n root.mainloop()\n","repo_name":"D-K-E/python-12-jours-appli","sub_path":"appli/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8035,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14356846009","text":"class Solution:\n # @param A a list of integers\n # @return nothing, sort in place\n def sortColors(self, A):\n red_count = 0\n white_count = 0\n \n for item in A:\n if item == 0:\n red_count += 1\n elif item == 1:\n white_count += 1\n \n for i in range(red_count):\n A[i] = 0\n for i in range(red_count, red_count + white_count):\n A[i] = 1\n for i in range(red_count + white_count, len(A)):\n A[i] = 2","repo_name":"eddiesong/LeetCode","sub_path":"sort_color.py","file_name":"sort_color.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24951338482","text":"import cv2\nimport face_recognition\nimport os\n\nstored_faces = []\nfaces_names = []\n\n\ndef recognize_face(encoding_img): # If the face in system, it gives name\n match = face_recognition.compare_faces(stored_faces, encoding_img)\n try:\n return faces_names[match.index(True)]\n except ValueError:\n return None\n\n\ndef add_face(): # Add face to the system by using cam\n camera = cv2.VideoCapture(0)\n i = 0\n while i < 10:\n ret, frame = camera.read()\n face_locations = face_recognition.face_locations(frame)\n i = i + 1 # In the first frame camera sometimes doesn't detect someone\n if i == 10 and len(face_locations) > 1:\n print(\"Too much faces\")\n elif i == 10 and len(face_locations) < 1:\n print(\"There are no faces\")\n elif len(face_locations) == 1:\n face = face_locations[0]\n cv2.rectangle(frame, (face[3], face[0]), (face[1], face[2]), (255, 0, 0), 2)\n cv2.imshow(\"Result\", frame)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n name = input(\"Please enter the name: \")\n newimg = frame[face[0]:face[2], face[3]: face[1]]\n cv2.imwrite(\"known_faces/\"+str(name)+\".jpg\", newimg)\n cv2.destroyAllWindows()\n camera.release()\n break\n\n\ndef complete_faces(): # Get all faces from filesystem and be ready to recognize faces\n images = os.listdir(\"known_faces\")\n names = [x[:x.index(\".\")] for x in images]\n for image, name in zip(images, names):\n read_img = cv2.imread(\"known_faces/\"+image)\n new_face = face_recognition.face_encodings(read_img)[0]\n stored_faces.append(new_face)\n faces_names.append(name)\n\n\nif __name__ == \"face_recognizer\":\n complete_faces()\nelif __name__ == \"__main__\":\n add_face()\n","repo_name":"HghaVlad/face_recognizer","sub_path":"face_recognizer.py","file_name":"face_recognizer.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73168106728","text":"play = input ('Do you want to play? ')\n\nif play != 'yes':\n quit()\n\nprint('Welcome! Lets play a Random number Guessing Game ')\n\nguess = input ('Write a number you want to guess between ')\nif guess.isdigit():\n guess = int(guess)\n\n if guess <= 0:\n print('Please type a number larger than 0 next time.')\n elif guess > 0:\n print('Guess a Number between 0 and',guess,'Good Luck!')\n else:\n print('Please type a number next time.')\n quit()\n\nimport random\nr = random.randrange(0, guess)\nguesses = 0\n\nwhile True:\n guesses += 1\n answer = input('Guess a number ')\n if answer.isdigit():\n answer = int(answer)\n else:\n print('Please type in a number ')\n continue\n\n if answer == r:\n print('You got it! ')\n break\n else:\n if answer > r:\n print('You were above the number! ')\n else:\n print('You were below the number! ')\n\nprint('You got it in',guesses, \"guesses\")\n","repo_name":"Mounza/Python","sub_path":"selfProjects/randomNumber.py","file_name":"randomNumber.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20800700702","text":"#!/usr/bin/env python3\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.metrics import confusion_matrix\n\nimport numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\nimport pandas\nimport sys\n\n\ndef get_all_data():\n with open(sys.argv[1], \"r\", encoding='UTF-8') as text_file:\n #with open(\"output.csv\", \"r\") as text_file:\n data = text_file.read().split('\\n')\n #remove the csv header\n data.pop(0)\n return data\n\n\ndef preprocessing_data(data):\n processing_data = []\n for single_data in data:\n splitData = single_data.split(\"^\")\n if len(splitData) > 10:\n #processing_data.append(splitData)\n tempData = [splitData[12],splitData[13]]\n #add only those which have a score\n if splitData[13] != '':\n processing_data.append(tempData)\n\n return processing_data\n\ndef split_data(data):\n total = len(data)\n training_ratio = 0.75\n training_data = []\n evaluation_data = []\n\n for indice in range(0, total):\n if indice < total * training_ratio:\n training_data.append(data[indice])\n else:\n evaluation_data.append(data[indice])\n\n return training_data, evaluation_data\n\ndef preprocessing_step():\n data = get_all_data()\n processing_data = preprocessing_data(data)\n\n return split_data(processing_data)\n\ndef training_step(data, vectorizer):\n training_text = [data[0] for data in data]\n training_result = [data[1] for data in data]\n\n training_text = vectorizer.fit_transform(training_text)\n\n return BernoulliNB().fit(training_text, training_result)\n\n\ndef analyse_text(classifier, vectorizer, text):\n return text, classifier.predict(vectorizer.transform([text]))\n\n\ndef simple_evaluation(evaluation_data):\n evaluation_text = [evaluation_data[0] for evaluation_data in evaluation_data]\n evaluation_result = [evaluation_data[1] for evaluation_data in evaluation_data]\n\n total = len(evaluation_text)\n corrects = 0\n for index in range(0, total):\n analysis_result = analyse_text(classifier, vectorizer, evaluation_text[index])\n text, result = analysis_result\n corrects += 1 if result[0] == evaluation_result[index] else 0\n return corrects * 100 / total\n\n\ntraining_data, evaluation_data = preprocessing_step()\nvectorizer = CountVectorizer(binary = 'true')\nclassifier = training_step(training_data, vectorizer)\n\nprint(simple_evaluation(evaluation_data))\n\n\n#print(preprocessing_step())\n\n\n\n# podatki = preprocessing_data(get_all_data())\n# print(podatki)\n# for k in podatki:\n# print(k)\n\n","repo_name":"pitastrudl/sentAn","sub_path":"SentAnaWithBayes.py","file_name":"SentAnaWithBayes.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11077943139","text":"__title__ = \"jobsub_lite\"\n__summary__ = \"The local HTCondor job submission software for Fermilab users to submit jobs to local FermiGrid resources and to the Open Science Grid.\"\n__uri__ = \"https://fifewiki.fnal.gov/wiki/Jobsub_Lite\"\n\n__version__ = \"1.4.1\"\n__email__ = \"jobsub-support@fnal.gov\"\n\n__license__ = \"Apache License, Version 2.0\"\n__author__ = \"Fermi National Accelerator Laboratory\"\n__copyright__ = \"2023 %s\" % __author__\n\n\ndef print_version() -> None:\n print(f\"{__title__} version {__version__}\")\n exit()\n\n\ndef print_support_email() -> None:\n print(f\"Email {__email__} for help.\")\n exit()\n","repo_name":"fermitools/jobsub_lite","sub_path":"lib/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"37409177606","text":"# imports\nimport socket\nimport sys\nimport os\nimport datetime\n\n# function: send tcp message to target\ndef send(ip, port, mesg, outfile):\n\n\t# get timestamp\n\tct = datetime.datetime.now()\n\n\t# Create a TCP/IP socket\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t# Connect the socket to the port where the server is listening\n\ttarget = (ip, port)\n\n\t#print(f'Connecting to port {port} at {ip}')\n\tsock.connect(target)\n\n\t# send data\n\ttry:\n\n\t\t# clear the screen\n\t\tos.system('clear')\n\n\t\t# print and write to file\n\t\tprint(f'Sending message: {mesg}') \n\t\toutfile.write(f'Sending to IP:{ip}, port:{str(port)}, time:{ct}\\n')\n\t\toutfile.write(f'Sending message: {mesg}\\n')\n\n\t\t# encode the message as byte string\n\t\tmesg = mesg.encode()\n\n\t\t# send the message\n\t\tsock.sendall(mesg)\n\n\t\t# receive response\n\t\tdata = sock.recv(1024)\n\n\t\t# print and write response to file\n\t\tprint(f'Received response: {data}')\n\t\toutfile.write(f'Received response: {data}\\n')\n\n\t# close socket\n\tfinally:\n\t\toutfile.write('\\n')\n\t\tsock.close()\n\n# main function\ndef main(argv):\n\n\t# error checking input\n\tif len(argv) != 4:\n\t\tprint(\"Usage: '\n resources = r.need(library_name, resource_id, resource_type)\n resources_to_include = []\n for resource in resources:\n if not request.activeResources.resource_in_request(\n library_name, resource[\"resourceID\"], resource_type\n ):\n request.activeResources.add_resource(\n library_name, resource[\"resourceID\"], resource_type\n )\n resources_to_include.append(\n jinjaEnv.from_string(html).render(\n file=request.application_url + \"/\" + resource[\"filePath\"]\n )\n )\n return literal(\"\\n\".join(resources_to_include))\n else:\n return \"\"\n\n\nclass ExtendThis(ext.Extension): # pragma: no cover\n \"\"\"\n This class implements the extend_me tag. Not include in Coverage because Coverage cannot detect its call\n \"\"\"\n\n tags = [\"extend_me\"]\n\n def __init__(self, environment):\n ext.Extension.__init__(self, environment)\n try:\n self.searchpath = jinjaEnv.loader.searchpath[:]\n except AttributeError:\n # this isn't available on message extraction\n pass\n\n def parse(self, parser):\n lineno = next(parser.stream).lineno\n node = nodes.Extends(lineno)\n template_file = parser.filename\n template_path = parser.filename\n\n # We need to have a list of template paths to look for\n if not hasattr(self, \"searchpath\"):\n return node\n\n # First we remove the templates path from the file\n # so to have the just the template file or a template file in a subdirectory of templates\n for searchpath in self.searchpath:\n template_file = template_file.replace(searchpath, \"\")\n\n # Here we get the template path of the file\n template_path = template_path.replace(template_file, \"\")\n\n # Find the position of the template's path in the list of paths\n index = -1\n try:\n index = self.searchpath.index(template_path)\n except ValueError:\n pass\n if index == -1:\n return node\n\n # index is the position of the this template's path\n # so we search down stream for the template in other paths\n file_to_extend = \"\"\n for pos in range(index + 1, len(self.searchpath)):\n if os.path.exists(self.searchpath[pos] + template_file):\n file_to_extend = self.searchpath[pos] + template_file\n break\n\n # If the file to extend from exits then set it as a template\n if file_to_extend == \"\":\n return node\n else:\n node.template = nodes.Const(file_to_extend)\n\n return node\n\n\nclass BaseExtension(ext.Extension): # pragma: no cover\n \"\"\"\n Base class for creating custom jinja2 tags.\n parse expects a tag of the format\n {% tag_name args, kw %}\n after parsing it will call _call(args, kw) which must be defined.\n\n This code is based on CKAN\n :Copyright: (C) 2007 Open Knowledge Foundation\n :license: AGPL V3, see LICENSE for more details.\n \"\"\"\n\n def parse(self, parser):\n stream = parser.stream\n tag = next(stream)\n # get arguments\n args = []\n kwargs = []\n while not stream.current.test_any(\"block_end\"):\n if args or kwargs:\n stream.expect(\"comma\")\n if stream.current.test(\"name\") and stream.look().test(\"assign\"):\n key = nodes.Const(next(stream).value)\n stream.skip()\n value = parser.parse_expression()\n kwargs.append(nodes.Pair(key, value, lineno=key.lineno))\n else:\n args.append(parser.parse_expression())\n\n def make_call_node(*kw):\n return self.call_method(\n \"_call\", args=[nodes.List(args), nodes.Dict(kwargs)], kwargs=kw\n )\n\n return nodes.Output([make_call_node()]).set_lineno(tag.lineno)\n\n\nclass JSResourceExtension(BaseExtension): # pragma: no cover\n tags = [\"jsresource\"]\n\n @classmethod\n def _call(cls, args, kwargs):\n assert len(args) == 3\n assert len(kwargs) == 0\n return render_resource(args[0], args[1], \"JS\", args[2])\n\n\nclass CSSResourceExtension(BaseExtension): # pragma: no cover\n tags = [\"cssresource\"]\n\n @classmethod\n def _call(cls, args, kwargs):\n assert len(args) == 3\n assert len(kwargs) == 0\n return render_resource(args[0], args[1], \"CSS\", args[2])\n\n\ndef regularise_html(html): # pragma: no cover\n \"\"\"\n Take badly formatted html with strings\n\n\n This code is based on CKAN\n :Copyright (C) 2007 Open Knowledge Foundation\n :license: AGPL V3, see LICENSE for more details.\n :param html: The html to be formated\n :return: Formated html\n \"\"\"\n\n if html is None:\n return\n html = re.sub(\"\\n\", \" \", html)\n matches = re.findall(\"(<[^>]*>|%[^%]\\([^)]*\\)\\w|[^<%]+|%)\", html)\n for i in range(len(matches)):\n match = matches[i]\n if match.startswith(\"<\") or match.startswith(\"%\"):\n continue\n matches[i] = re.sub(\"\\s{2,}\", \" \", match)\n html = \"\".join(matches)\n return html\n","repo_name":"qlands/FormShare","sub_path":"formshare/config/jinja_extensions.py","file_name":"jinja_extensions.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"}
+{"seq_id":"31408435937","text":"import PySimpleGUI as sg\r\nfrom PIL import Image,ImageTk,ImageDraw,ImageFont\r\n\r\nsg.theme('BlueMono')\r\nlayout_1 =[ \r\n [sg.FileBrowse('Load', enable_events=True,size=(10, 1),tooltip=\"Helps you to load a new image every time.\",key='browse')],\r\n [sg.Image(filename=\"\",size=(400,400), key='Imag')], \r\n [sg.Text(\"Enter width:\",size=(9, 1)),sg.Input(key='wid_input'),sg.Text(\"Enter Degrees of rotation:\",size=(20, 1)),sg.Input(key='dor_input', size=(7,1)),\r\n sg.Text(\"Enter your text here:\",size=(15, 1)),sg.Input(key='text', size=(20,10))\r\n ],\r\n [sg.Text(\"Enter height:\",size=(9, 1)),sg.Input(key='heig_input'),sg.Text(\"\",size=(20, 1)),sg.Button(\"Rotate\",key = \"rot\"),\r\n sg.Text(\"\",size=(16, 1)),sg.Button(\"Apply Text\",key = \"applytxt\")\r\n ],\r\n [sg.Button(\"Resize\",key = \"rs\"),sg.Text(\"\",size=(20, 1)),sg.Button(\"Transpose\",key = \"transpose\")],\r\n [sg.Text(\"\",size=(55, 1)),sg.Button(\"SAVE\",size =(20,1),key = \"save\",tooltip=\"Save your final Image\")]\r\n ]\r\nwindow = sg.Window('Image Editor', layout_1)\r\nfilename = ''\r\nwhile True:\r\n event, values = window.read()\r\n print(event, values) \r\n if event == sg.WINDOW_CLOSED:\r\n break\r\n elif event == 'browse' and filename != values['browse']: \r\n filename = values['browse']\r\n img = Image.open(values['browse']).resize((600,600))\r\n width, height = img.size \r\n window['Imag'].Update(data= ImageTk.PhotoImage(img)) \r\n img\r\n\r\n else: \r\n if event: \r\n if event == 'rs':\r\n if values['wid_input'] != '' and values['heig_input'] != '': \r\n width, height = img.size\r\n img = img.resize((int(values['wid_input']),int(values['heig_input']))) \r\n window['Imag'].Update(data= ImageTk.PhotoImage(img)) \r\n else:\r\n sg.popup('Please Enter both Width and Height values') \r\n \r\n elif event == 'rot':\r\n if values['dor_input'] != '': \r\n img = img.rotate((int(values['dor_input']))) \r\n window['Imag'].Update(data= ImageTk.PhotoImage(img)) \r\n else:\r\n sg.popup('Please Enter degree of rotation') \r\n \r\n elif event == 'transpose':\r\n if img: \r\n img = img.transpose(Image.FLIP_LEFT_RIGHT) \r\n window['Imag'].Update(data= ImageTk.PhotoImage(img)) \r\n else:\r\n sg.popup('Please Enter degree of rotation')\r\n\r\n elif event == 'applytxt':\r\n draw = ImageDraw.Draw(img)\r\n myFont = ImageFont.truetype('C:/ProgramData/Anaconda3/pkgs/werkzeug-1.0.1-py_0/site-packages/werkzeug/debug/shared/ubuntu.ttf', 40)\r\n draw.text((0, 0), values['text'], font=myFont, fill =(300, 0, 0))\r\n window['Imag'].Update(data= ImageTk.PhotoImage(img)) \r\n\r\n elif event == \"save\":\r\n img \r\n img.save(\"Edited_Image.PNG\")\r\n sg.popup('Thankyou for saving the image')\r\n break\r\n\r\n else:\r\n break\r\n \r\nwindow.close()","repo_name":"karteekvarma/Python","sub_path":"Assignmnet.py","file_name":"Assignmnet.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"39150390402","text":"import os\n\nfrom dotenv import find_dotenv, load_dotenv\nfrom pymongo import MongoClient\n\n\nclass MongoDBConnection:\n __instance = None\n\n @staticmethod\n def getInstance():\n \"\"\" Static access method. \"\"\"\n if MongoDBConnection.__instance == None:\n MongoDBConnection()\n return MongoDBConnection.__instance\n\n def __init__(self):\n \"\"\" Virtually private constructor. \"\"\"\n if MongoDBConnection.__instance != None:\n raise Exception(\"This class is a singleton!\")\n else:\n # Replace the values with your actual MongoDB connection string and database name\n load_dotenv(find_dotenv())\n # Replace the values with your actual MongoDB connection string and database name\n passwd = os.environ.get('MONGO_PWD')\n self.db_name = os.environ.get('MONGO_DB')\n self.username = os.environ.get('MONGO_USER')\n self.host = os.environ.get('MONGO_HOST')\n self.port = os.environ.get('MONGO_PORT')\n self.auth_src = os.environ.get('MONGO_AUTH_SOURCE')\n os_env_uri = os.environ.get('MONGO_URI')\n if os_env_uri:\n self.CONNECTION_STRING = os_env_uri\n else:\n self.CONNECTION_STRING = f\"mongodb://admin:admin@localhost:27019/fbMarketplaceScrapper?authSource=admin\"\n \n\n self.client = MongoClient(self.CONNECTION_STRING)\n self.db = self.client[self.db_name]\n MongoDBConnection.__instance = self\n\n def getDatabase(self):\n return self.db\n","repo_name":"gjustoo/FM_scrapper","sub_path":"db/MongoDBConnection.py","file_name":"MongoDBConnection.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"1877806996","text":"from elasticsearch import Elasticsearch\nfrom typing import List\n\nINDEX = 'courses'\n\n\ndef create_es_instance() -> Elasticsearch:\n es = Elasticsearch(hosts=[f\"http://206.189.56.21:9200\"])\n return es\n\n\ndef simple_query(es: Elasticsearch, query_input: List[str]):\n query = {\n 'query': {\n 'query_string': {\n 'query': \" OR \".join([f\"({q})\" for q in query_input]),\n \"fields\": [\"course\"]\n }\n },\n \"highlight\": {\n \"fields\": {\n \"course\": {}\n }\n }, # highlight here\n }\n\n results = es.search(index=INDEX, body=query)\n return results[\"hits\"][\"hits\"]\n","repo_name":"JustCheckingHow/whistle-boar","sub_path":"src/es_utils.py","file_name":"es_utils.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8305354759","text":"from django.http import HttpRequest\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .models import Weapon\nfrom .serializers import WeaponSerializer\nfrom character.models import *\n\n# Create your views here.\n@api_view([\"GET\"])\ndef get_all_weapons(request: HttpRequest):\n weapons = Weapon.objects.all()\n serializer = WeaponSerializer(weapons, many=True)\n\n return Response(serializer.data)\n\n\n@api_view([\"GET\", \"POST\"])\ndef create_weapon(request: HttpRequest):\n data = request.data\n serializer = WeaponSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({\"Success\": \"The weapon was created successfully\"}, status=201)\n else:\n return Response(serializer.errors, status=400)\n\n\n@api_view([\"GET\"])\ndef get_weapon(request: HttpRequest, weapon_id: int):\n try:\n weapon = Weapon.objects.get(id=weapon_id)\n serializer = WeaponSerializer(weapon)\n return Response(serializer.data)\n except Weapon.DoesNotExist:\n return Response({\"Error\": \"The weapon does not exist\"}, status=404)\n\n\n@api_view([\"GET\"])\ndef get_weapon_by_name(request: HttpRequest):\n weapon_name = request.data.get(\"weapon_name\")\n try:\n weapon = Weapon.objects.get(name=weapon_name)\n serializer = WeaponSerializer(weapon)\n return Response(serializer.data)\n except Weapon.DoesNotExist:\n return Response({\"Error\": \"The weapon does not exist\"}, status=404)\n\n\n@api_view([\"DELETE\"])\ndef delete_weapon(request: HttpRequest, weapon_id: int):\n try:\n weapon = Weapon.objects.get(id=weapon_id)\n weapon.delete()\n return Response({\"Success\": \"The weapon was deleted successfully\"}, status=200)\n except Weapon.DoesNotExist:\n return Response({\"Error\": \"The weapon does not exist\"}, status=404)\n\n\n@api_view([\"POST\"])\ndef add_weapon(request: HttpRequest):\n character_id = request.data.get(\"character_id\")\n weapon_id = request.data.get(\"weapon_id\")\n quantity = request.data.get(\"quantity\", 1)\n\n try:\n character = Character.objects.get(id=character_id)\n except Character.DoesNotExist:\n return Response({\"Error\": \"The character does not exist\"}, status=404)\n\n try:\n weapon = Weapon.objects.get(id=weapon_id)\n except Weapon.DoesNotExist:\n return Response({\"Error\": \"The weapon does not exist\"}, status=404)\n\n weapon_quantity, created = WeaponQuantity.objects.get_or_create(\n character=character, weapon=weapon, defaults={\"quantity\": quantity}\n )\n if not created:\n weapon_quantity.quantity += quantity\n weapon_quantity.save()\n\n return Response(\n {\"Success\": \"The weapon was added to the character successfully\"}, status=201\n )\n\n\n@api_view([\"POST\"])\ndef remove_weapon(request: HttpRequest):\n character_id = request.data.get(\"character_id\")\n weapon_id = request.data.get(\"weapon_id\")\n\n try:\n character = Character.objects.get(id=character_id)\n except Character.DoesNotExist:\n return Response({\"Error\": \"The character does not exist\"}, status=404)\n\n try:\n weapon = Weapon.objects.get(id=weapon_id)\n except Weapon.DoesNotExist:\n return Response({\"Error\": \"The weapon does not exist\"}, status=404)\n\n try:\n weapon_quantity = WeaponQuantity.objects.get(character=character, weapon=weapon)\n weapon_quantity.delete()\n except WeaponQuantity.DoesNotExist:\n return Response(\n {\"Error\": \"The weapon is not associated with the character\"}, status=404\n )\n\n return Response(\n {\"Success\": \"The weapon was removed from the character successfully\"},\n status=200,\n )\n\n\n@api_view([\"POST\"])\ndef buy_weapon(request: HttpRequest):\n discord_id = request.data.get(\"discord_id\")\n character_name = request.data.get(\"character_name\")\n weapon_name = request.data.get(\"weapon_name\")\n quantity = request.data.get(\"quantity\", 1)\n\n try:\n character = Character.objects.get(name=character_name, discordID=discord_id)\n except Character.DoesNotExist:\n return Response({\"Error\": \"The character does not exist\"}, status=404)\n\n try:\n weapon = Weapon.objects.get(name=weapon_name)\n except Weapon.DoesNotExist:\n return Response({\"Error\": \"The weapon does not exist\"}, status=404)\n\n if character.buy_weapon(weapon, quantity):\n return Response({\"Success\": \"The weapon was bought successfully\"}, status=200)\n else:\n return Response({\"Error\": \"Not enough money\"}, status=400)\n\n\n@api_view([\"POST\"])\ndef sell_weapon(request: HttpRequest):\n discord_id = request.data.get(\"discord_id\")\n character_name = request.data.get(\"character_name\")\n weapon_name = request.data.get(\"weapon_name\")\n quantity = request.data.get(\"quantity\", 1)\n\n try:\n character = Character.objects.get(name=character_name, discordID=discord_id)\n except Character.DoesNotExist:\n return Response({\"Error\": \"The character does not exist\"}, status=404)\n\n try:\n weapon = Weapon.objects.get(name=weapon_name)\n except Weapon.DoesNotExist:\n return Response({\"Error\": \"The weapon does not exist\"}, status=404)\n\n if character.sell_weapon(weapon, quantity):\n return Response({\"Success\": \"The weapon was sold successfully\"}, status=200)\n else:\n return Response({\"Error\": \"Not enough quantity of weapon to sell\"}, status=400)\n","repo_name":"jschmidt92/swade.io","sub_path":"api/weapon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70341084009","text":"# /usr/bin/python\n# -*- encoding:utf-8 -*-\n\n# hadoop退出安全模式: hadoop dfsadmin -safemode leave\n\"\"\"\nrequests.exceptions.ConnectionError: HTTPConnectionPool:\n在运行python程序的主机的hosts文件中加上主机名和ip的映射\n\"\"\"\n# pip3 install pyhdfs\nimport pyhdfs\n\n\ndef test():\n client = pyhdfs.HdfsClient(\"192.168.100.71,8088\", \"yyr\")\n hdfs_path = \"/\"\n # 新建目录\n client.mkdirs(\"/test-hadoop\", permission=777)\n client.rename(\"/test-hadoop\", \"/py-hadoop\")\n # 获取目录下文件\n # hdfs dfs -ls -R /\n print(client.listdir(hdfs_path))\n # 删除目录\n # hdfs dfs -rm -R /py-hadoop/test-hadoop\n # client.delete(\"/test-hadoop\")\n # 判断目录是否存在\n print(client.exists(\"/test-hadoop\"))\n # 将本地文件上传至hadoop\n # client.copy_from_local(r\"D:/Distributed System Deploy and Test/hadoop/prepare.md\", r\"/py-hadoop/prepare.md\")\n # 将hadoop文件down到本地\n # client.copy_to_local(r\"/py-hadoop/prepare.md\", r\"D:/prepare.md\")\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"yuanyaru/hadoop","sub_path":"py-hadoop.py","file_name":"py-hadoop.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"38505699618","text":"# !usr/bin/python\r\n# hacking my way through serial connections\r\n\r\n\r\nimport serial\r\nimport time\r\n\r\n\r\n# make serial object\r\nser = serial.Serial(\r\n port = 'COM1', #COM is on windows, linux is different\r\n baudrate=9600, #many different baudrates are available\r\n parity='N', #no idea\r\n stopbits=1,\r\n bytesize=8,\r\n timeout=8 #8 seconds seems to be a good timeout, may need to be increased\r\n )\r\n\r\ndef send_to_console(ser: serial.Serial, command: str, wait_time: float = 0.5):\r\n from time import sleep\r\n command_to_send = command + \"\\r\"\r\n ser.write(command_to_send.encode('utf-8'))\r\n sleep(wait_time)\r\n print(ser.read(ser.inWaiting()).decode('utf-8'), end=\"\")\r\n\r\ntime.sleep(1)\r\nwrite2 = send_to_console()\r\n# write3 = send_to_console(ser, ser.write(\"show vlan basic\"))\r\ntime.sleep(1)\r\ntime.sleep(1)\r\nsettings = ser.get_settings()\r\nname = ser.name\r\n\r\nprint(ser.isOpen())\r\n#print(write2)\r\n#print(write3)\r\nprint(name)","repo_name":"WiFiTG/MSPScripts","sub_path":"serial_connections.py","file_name":"serial_connections.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28363506577","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport csv\nfrom datetime import datetime\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\n\n\nparser = argparse.ArgumentParser(description=\"Make donation thank you notes.\")\nparser.add_argument(\n \"-j\", \"--join\", action=\"store_true\", help=\"combine output into one pdf\"\n)\nparser.add_argument(\n \"csv_path\",\n nargs=\"?\",\n default=\"donations.csv\",\n help=\"path to CSV file of donations (default: %(default)s)\",\n)\nargs = parser.parse_args()\n\ndirectory_name = \"notes\"\nif os.path.exists(directory_name):\n shutil.rmtree(directory_name)\nos.makedirs(directory_name)\n\nwith open(\"info.json\") as file:\n info = json.load(file)\n\ntoday = datetime.today()\nevent_date = datetime.strptime(info[\"Event date and time\"], \"%Y-%m-%d %H:%M\")\nassert today < event_date, \"Event date and time in info.json must be in the future\"\n\nwith open(os.path.join(\"support\", \"note-info.tex\"), \"w\") as file:\n file.write(\"\\\\newcommand\\\\SigmaStreet{{{}}}\\n\".format(info[\"Sigma address\"][0]))\n file.write(\n \"\\\\newcommand\\\\SigmaCityStateAndZIP{{{}}}\\n\".format(info[\"Sigma address\"][1])\n )\n file.write(\"\\\\newcommand\\\\eventName{{{}}}\\n\".format(info[\"Event name\"]))\n date_format = \"{date:%A}, {date:%B} {date.day}\"\n if event_date.year != today.year:\n date_format += \", {date.year}\"\n date_format += \" at {date.hour}\"\n if event_date.minute > 0:\n date_format += \":{date:%M}\"\n else:\n # U+2019 is the code point of ’, a right single quotation mark. Use the\n # code point instead of a literal ’ to avoid text encoding issues with\n # Python 3 on Windows.\n date_format += ' o\\\\char\"2019clock'\n file.write(\"\\\\newcommand\\\\eventDate{\" + date_format.format(date=event_date) + \"}\\n\")\n\ngenerated_files = []\n\nwith open(args.csv_path) as file:\n note_number = 0\n for row in csv.DictReader(file):\n with open(os.path.join(\"support\", \"donor-info.tex\"), \"w\") as file:\n file.write(\n \"\\\\newcommand\\\\donorDisplayName{{{}}}\\n\".format(\n row[\"Display name\"].strip()\n )\n )\n file.write(\n \"\\\\newcommand\\\\donorLastName{{{}}}\\n\".format(row[\"Last name\"].strip())\n )\n file.write(\n \"\\\\newcommand\\\\donorStreet{{{}}}\\n\".format(\n re.sub(\n r\"\\n|\\r\\n?\", r\"\\\\\\\\\", row[\"Street\"].strip(), flags=re.MULTILINE\n ).upper()\n )\n )\n file.write(\"\\\\newcommand\\\\donorCity{{{}}}\\n\".format(row[\"City\"].strip()))\n file.write(\"\\\\newcommand\\\\donorState{{{}}}\\n\".format(row[\"State\"].strip()))\n file.write(\"\\\\newcommand\\\\donorZIP{{{}}}\\n\".format(row[\"ZIP\"].strip()))\n file.write(\n \"\\\\newcommand\\\\donationAmount{{\\\\${}}}\\n\".format(\n re.sub(r\"\\.0*$\", \"\", \"{:,.2f}\".format(float(row[\"Amount\"])))\n )\n )\n file.write(\"\\\\newcommand\\\\donationMessage{\")\n if row[\"Anonymous\"] == \"TRUE\":\n file.write(\"As requested, your gift will remain anonymous. \")\n if row[\"Scholarships only\"] == \"TRUE\":\n file.write(\"We will use your gift to provide scholarships\")\n else:\n file.write(\n \"We will use your gift to provide the best possible Fraternal experience\"\n )\n file.write(\"}\\n\")\n\n note_number += 1\n subprocess.check_call(\n [\n \"lualatex\",\n \"-jobname=Note{}\".format(note_number),\n \"-interaction=batchmode\",\n \"-output-directory=\" + directory_name,\n \"Note.tex\",\n ]\n )\n\n generated_files.append(directory_name + \"/Note{}.pdf\".format(note_number))\n\n if args.join:\n subprocess.check_call(\n [\"pdfjoin\", \"--outfile\", directory_name + \"/Joined.pdf\"] + generated_files\n )\n","repo_name":"lcamichigan/donation-thank-you-note","sub_path":"make_notes.py","file_name":"make_notes.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41857727557","text":"import os\nimport IPython\n\nfrom dfbrowse.gui_debug import *\n\nclass IPythonCompleter(object):\n def __init__(self, hint):\n self.session = IPython.core.getipython.get_ipython()\n self.hint = hint\n def complete(self, prefix, completion_data):\n try:\n safe, prefix = prefix.rsplit(' ', 1)\n safe += ' '\n except:\n safe = ''\n try:\n print('trying to complete', prefix, safe, completion_data)\n completed, options = self.session.complete(prefix)\n self.hint('options: ' + ' '.join(str(opt) for opt in options))\n common_prefix = os.path.commonprefix(options)\n print('pre', prefix, 'comp', completed, 'cp', common_prefix)\n if len(common_prefix) > len(completed):\n return safe + common_prefix\n else:\n return completed\n except Exception as e:\n print('failed to complete')\n print(e)\n return prefix\n\ndef execute_ipython_command(string):\n session = IPython.core.getipython.get_ipython()\n session.run_cell(string, silent=True, store_history=False)\n session.execution_count += 1\n session.history_manager.store_inputs(session.execution_count,\n string)\n return True\n","repo_name":"petergaultney/df-browse","sub_path":"dfbrowse/ipython_utils.py","file_name":"ipython_utils.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18149751427","text":"h_line = input()\nh_arr = [int(y) for y in h_line.split()]\nword = input()\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\n\nwidth = len(word)\n\nmax_height = 0\n\nfor i in range(width):\n ind = alphabet.find(word[i])\n temp_height = h_arr[ind]\n if temp_height > max_height:\n max_height = temp_height\n\nlength = max_height\n\narea = int(max_height*width)\nprint(area)","repo_name":"Benson1198/31-Days-of-CP","sub_path":"Day 6/Designer PDF Viewer(Hackerrank).py","file_name":"Designer PDF Viewer(Hackerrank).py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70641271850","text":"import numpy as np\n\n\nprobs = np.array(\n [\n [\n 0.375,\n 0.625,\n ],\n [\n 0.8,\n 0.2,\n ],\n ]\n)\n\n\ndef update(preds, probs):\n x = preds[0] * probs[0][0] + preds[1] * probs[1][0]\n y = preds[0] * probs[0][1] + preds[1] * probs[1][1]\n return (x, y)\n\n\ndef main():\n n_chains = 5\n preds = (0, 1)\n for i in range(n_chains):\n preds = update(preds, probs)\n print(\" | \".join(map(lambda x: f\"{x:.8f}\", preds)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"uatach/markov-chains-gagniuc","sub_path":"src/main04.py","file_name":"main04.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"37697669454","text":"from typing import Dict, Optional\n\nfrom catwalk.model import Model\nfrom catwalk.models.eleuther import EAIGPT, EAIT5\nfrom catwalk.models.gpt import GPTModel\nfrom catwalk.models.huggingface import HFAutoModel\nfrom catwalk.models.ia3 import IA3MetaICLModel\nfrom catwalk.models.language_model import DecoderOnlyLanguageModel\nfrom catwalk.models.metaicl import MetaICLModel\nfrom catwalk.models.promptsource import (\n PromptsourceDecoderOnlyRCModel,\n PromptsourceEncoderDecoderRCModel,\n)\nfrom catwalk.models.rank_classification import DecoderOnlyRCModel, EncoderDecoderRCModel\nfrom catwalk.models.soft_prompt import with_soft_prompt\nfrom catwalk.models.t5 import T5Model, T5ModelFromPretrained\n\n_ENCODER_DECODER_MODELS = {\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n \"bigscience/T0\",\n \"bigscience/T0p\",\n \"bigscience/T0pp\",\n \"bigscience/T0_single_prompt\",\n \"bigscience/T0_original_task_only\",\n \"bigscience/T0-3B\",\n \"google/mt5-small\",\n \"google/mt5-base\",\n \"google/mt5-large\",\n \"google/mt5-xl\",\n \"google/t5-small-lm-adapt\",\n \"google/t5-base-lm-adapt\",\n \"google/t5-large-lm-adapt\",\n \"google/t5-xl-lm-adapt\",\n \"google/t5-xxl-lm-adapt\",\n \"google/t5-v1_1-small\",\n \"google/t5-v1_1-base\",\n \"google/t5-v1_1-large\",\n \"google/t5-v1_1-xl\",\n \"google/t5-v1_1-xxl\",\n \"stas/t5-very-small-random\",\n \"bigscience/T0\",\n \"bigscience/T0p\",\n \"bigscience/T0pp\",\n \"bigscience/T0_single_prompt\",\n \"bigscience/T0_original_task_only\",\n \"bigscience/T0-3B\",\n \"ThomasNLG/CT0-11B\",\n}\n\n_DECODER_ONLY_MODELS = {\n \"sshleifer/tiny-gpt2\",\n \"gpt2\",\n \"gpt2-medium\",\n \"gpt2-large\",\n \"gpt2-xl\",\n \"bigscience/bloom-560m\",\n \"bigscience/bloom-1b1\",\n \"bigscience/bloom-1b7\",\n \"bigscience/bloom-3b\",\n \"bigscience/bloom-7b1\",\n \"bigscience/bloom\",\n \"facebook/opt-125m\",\n \"facebook/opt-350m\",\n \"facebook/opt-1.3b\",\n \"facebook/opt-2.7b\",\n \"facebook/opt-6.7b\",\n \"facebook/opt-13b\",\n \"facebook/opt-30b\",\n \"facebook/opt-66b\",\n \"EleutherAI/gpt-j-6B\",\n \"EleutherAI/gpt-neo-125M\",\n \"EleutherAI/gpt-neo-1.3B\",\n \"EleutherAI/gpt-neo-2.7B\",\n \"EleutherAI/gpt-neox-20b\",\n}\n\n\ndef _shorten_hf_name(hf_name: str) -> str:\n hf_name = hf_name.lower()\n parts = hf_name.split(\"/\", 1)\n return parts[-1]\n\n\nMODELS: Dict[str, Model] = {\n \"bert-base-uncased\": HFAutoModel(\"bert-base-uncased\"),\n \"bert-base-cased\": HFAutoModel(\"bert-base-cased\"),\n \"bert-large-uncased\": HFAutoModel(\"bert-large-uncased\"),\n \"bert-large-cased\": HFAutoModel(\"bert-large-cased\"),\n \"roberta-base\": HFAutoModel(\"roberta-base\"),\n \"roberta-large\": HFAutoModel(\"roberta-large\"),\n \"tiny-bert\": HFAutoModel(\"prajjwal1/bert-tiny\"),\n \"distilbert-base-cased-distilled-squad\": HFAutoModel(\n \"distilbert-base-cased-distilled-squad\"\n ),\n \"deberta-v3-base\": HFAutoModel(\"microsoft/deberta-v3-base\"),\n \"deberta-v3-small\": HFAutoModel(\"microsoft/deberta-v3-small\"),\n \"deberta-v3-large\": HFAutoModel(\"microsoft/deberta-v3-large\"),\n \"deberta-v2-xlarge\": HFAutoModel(\"microsoft/deberta-v2-xlarge\"),\n \"deberta-v2-xxlarge\": HFAutoModel(\"microsoft/deberta-v2-xxlarge\"),\n}\n\nfor hf_name in _ENCODER_DECODER_MODELS:\n name = _shorten_hf_name(hf_name)\n MODELS[name] = T5ModelFromPretrained(hf_name)\n MODELS[f\"eai::{name}\"] = EAIT5(hf_name)\n MODELS[f\"rc::{name}\"] = EncoderDecoderRCModel(hf_name)\n MODELS[f\"promptsource::{name}\"] = PromptsourceEncoderDecoderRCModel(hf_name)\n\n\ndef add_decoder_only_model(name, hf_name, **kwargs):\n MODELS[name] = GPTModel(hf_name, **kwargs)\n MODELS[f\"eai::{name}\"] = EAIGPT(hf_name, **kwargs)\n MODELS[f\"rc::{name}\"] = DecoderOnlyRCModel(hf_name, **kwargs)\n MODELS[f\"lm::{name}\"] = DecoderOnlyLanguageModel(hf_name, **kwargs)\n MODELS[f\"metaicl::{name}\"] = MetaICLModel(hf_name, **kwargs)\n MODELS[f\"promptsource::{name}\"] = PromptsourceDecoderOnlyRCModel(hf_name, **kwargs)\n\n\nfor hf_name in _DECODER_ONLY_MODELS:\n name = _shorten_hf_name(hf_name)\n add_decoder_only_model(name, hf_name)\n\n\nMODELS[\"rc::opt-175b\"] = DecoderOnlyRCModel(\n \"/net/nfs.cirrascale/allennlp/opt/opt-175b-huggingface\",\n pretrained_tokenizer_name_or_path=\"facebook/opt-66b\",\n)\n\n\ndef short_name_for_model_object(model: Model) -> Optional[str]:\n for model_name, model_object in MODELS.items():\n if id(model) == id(model_object):\n return model_name\n return None\n","repo_name":"allenai/catwalk","sub_path":"catwalk/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"53"}
+{"seq_id":"70860716969","text":"import pickle\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.model_selection import train_test_split\n\nDATA_DIR = Path(__file__).parent / \"../data\"\nDEFAULT_TEST_SIZE = 0.1\nSEED = 42\n\n\ndef prepare_metadata_file(data: pd.DataFrame, test_size=DEFAULT_TEST_SIZE):\n train, val = train_test_split(data, test_size=test_size, random_state=SEED)\n train[\"stage\"] = \"train\"\n val[\"stage\"] = \"val\"\n meta = pd.concat((train, val), ignore_index=True)\n meta.to_csv(DATA_DIR / \"metadata.csv\", index=False)\n\n\ndef process_tags(tags):\n tags = list(map(int, tags.split(\",\")))\n return tags\n\n\ndef prepare_weights_for_wbce(data: pd.DataFrame):\n tags = data.tags.apply(process_tags)\n all_tags = tags.explode()\n tags_count = all_tags.value_counts()\n all_tags = all_tags.values.astype(np.int32)\n\n weights = torch.from_numpy(1 / tags_count.sort_index().values).float()\n torch.save(weights, DATA_DIR / \"bce_class_weights.pth\")\n\n\ndef prepare_weights_for_resample_loss(data: pd.DataFrame):\n tags = data.tags.apply(process_tags)\n all_tags = tags.explode()\n tags_cnt = all_tags.value_counts().sort_index()\n\n class_freq = tags_cnt.values\n neg_class_freq = len(data) - class_freq\n\n res = {\n \"class_freq\": class_freq,\n \"neg_class_freq\": neg_class_freq,\n }\n\n with open(DATA_DIR / \"class_freq.pkl\", \"wb\") as f:\n pickle.dump(res, f)\n\n\nif __name__ == \"__main__\":\n train_df = pd.read_csv(DATA_DIR / \"train.csv\")\n\n prepare_metadata_file(train_df)\n prepare_weights_for_wbce(train_df)\n prepare_weights_for_resample_loss(train_df)\n","repo_name":"traptrip/yandex_cup_ml_2023","sub_path":"recsys/utils/prepare_dataset.py","file_name":"prepare_dataset.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"71888528489","text":"# 프로그래머스 코딩 테스트 공부\n# https://school.programmers.co.kr/learn/courses/30/lessons/118668\ndef solution(alp, cop, problems):\n max_a = alp\n max_c = cop\n for problem in problems:\n max_a = max(problem[0], max_a)\n max_c = max(problem[1], max_c)\n\n dp = [[int(1e9)] * (max_c + 1) for _ in range(max_a + 1)]\n dp[alp][cop] = 0\n\n for a in range(alp, max_a + 1):\n for c in range(cop, max_c + 1):\n if a + 1 <= max_a:\n dp[a + 1][c] = min(dp[a][c] + 1, dp[a + 1][c])\n if c + 1 <= max_c:\n dp[a][c + 1] = min(dp[a][c] + 1, dp[a][c + 1])\n for problem in problems:\n if problem[0] <= a and problem[1] <= c:\n next_a = min(max_a, a + problem[2])\n next_c = min(max_c, c + problem[3])\n dp[next_a][next_c] = min(dp[next_a][next_c], dp[a][c] + problem[4])\n\n return dp[-1][-1]","repo_name":"do0134/solostudy","sub_path":"algorithm/4월/0427/2sol.py","file_name":"2sol.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"26645946919","text":"# TODO: Rewrite visualization functions to class\n\nfrom include import *\n\n\nclass EarlyStopping(object):\n def __init__(self, optimizer, monitor_metric=False, mode='minimize', patience=10):\n \"\"\"Callback. Early stops the training if validation loss doesn't improve \n after a given patience.\n Inputs:\n - monitor: (string) name of validation metric to track\n - mode: (string) E {'minimize', 'maximize'}\n - patience: (int) number of epochs to wait after we found no improvement\"\"\"\n\n self.optimizer = optimizer\n self.monitor_metric = monitor_metric\n self.patience = patience\n self.mode = mode\n \n self.best_metric = None\n self.num_bad_epochs = 0\n self.stop_train = False\n\n def update(self, batch_metrics):\n \"\"\"Each call this func is comparing current batch metric with last on in the\n history, if delta is less than min_delta than counter + 1. To break the training \n loop func should returns False.\"\"\"\n \n # if monitor_metric doesn't define that skip early stopping\n if self.monitor_metric == False: return False\n \n # unpack monitor metric from metrics\n metric = batch_metrics[self.monitor_metric] \n\n if self.best_metric is None: self.best_metric = metric\n \n # if 'minimize' than metric should lower that best otherwise start counter\n # if 'maximize' than metric should higher that best otherwise start counter\n compare_sign = operator.gt if self.mode=='minimize' else operator.lt\n if compare_sign(metric, self.best_metric):\n self.num_bad_epochs += 1\n print(f'- EarlyStopping counter: {self.num_bad_epochs} out of {self.patience}')\n if self.num_bad_epochs >= self.patience: self.stop_train = True\n else:\n self.best_metric = metric\n self.num_bad_epochs = 0 \n self.stop_train = False\n return self.stop_train\n\n\n# class EarlyStopping(object):\n# def __init__(self, optimizer, monitor_metric=False, minimize=True, patience=5, min_delta=0.1):\n# \"\"\"Old implementation. Callback. We have two options to define criteria for stopping. First is to track delta\n# between train and validation metric. When this defference encreasing that stop\n# training. Second option, implemented here, is to track monitor metric and if\n# this metric doesn't imporve patience epochs that stop training. We check abs difference\n# and if metric doesn't change (increse or decrease, don't care) than we stop training.\n# Means that we stop training when monitor metric on plato. But we can define how many epochs\n# we can wait (patience).\n\n# Inputs:\n# - monitor: (string) name of validation metric to track.\n# - patience: (int) number of epochs to wait after we found no improvement.\n# - min_delta: (int) minimum change in the monitored quantity to qualify as \n# an improvement, i.e. an absolute change of less than min_delta, will \n# count as no improvement.\"\"\"\n\n# self.optimizer = optimizer\n# self.monitor_metric = monitor_metric\n# self.patience = patience\n# self.min_delta = min_delta\n \n# self.metric_history = [0.001]\n# self.counter = 0\n\n# def update(self, batch_metrics):\n# \"\"\"Each call this func is comparing current batch metric with last on in the\n# history, if delta is less than min_delta than counter + 1.\"\"\"\n \n# # if monitor_metric doesn't define that skip early stopping\n# if self.monitor_metric == False:\n# return False\n \n# # unpack monitor metric from metrics\n# metric = batch_metrics[self.monitor_metric] \n\n# # calculate delta between current batch metric and last one in history\n# if np.abs(self.metric_history[-1] - metric) < self.min_delta: self.counter += 1\n# else: self.counter = 0 \n\n# # append curent batch metric to history\n# self.metric_history.append(metric)\n\n# stop_train = True if self.counter == self.patience else False\n# return stop_train\n\n \nclass RealTimeMonitoring(object):\n \"\"\"Class visualizing training in real time. Helps to monitor and \n debug nn while training. Takes history dict updates while trainins.\n Pay attention history.json doesn't exist yet. We are using history \n variable fro train.py script on the fly.\n \n Example:\n \n --init history = {}--\n monitor = RealTimeMonitoring(history)\n --update history--\n monitor.update(history)\n \n Inputs:\n - history: (dict) containing values of metrics for train and valid states.\"\"\"\n \n def __init__(self, history):\n \"\"\"Automaticaly build figure based on number of metrics. Each metric\n has it's own plot. Each plot contain two curves: train and valid.\"\"\"\n \n self.metrics = list(history['train'].keys())\n num_metrics = len(self.metrics)\n self.fig, self.axes = plt.subplots(1, num_metrics, figsize=(16, 6))\n # just for visual comford after first epoch\n for ax in (self.axes.flatten()):\n ax.set_ylabel(\"Y-label\", fontdict={'fontsize': 12}); ax.set_xlabel('X-label')\n ax.set_title(\"Title\", fontdict={'fontsize': 12})\n \n def _animate(self, history, ax, metric):\n \"\"\"Func update one axes with train and valid curves of one metric. \n So this is one plot of one metric but two curves: train and valid.\n Inputs:\n - history: (dict) updated hsitory with last batch metrics\n - ax: (axes) where we need to plot metric for train, valid\n - metric: (scting) metric which we need to plot.\"\"\"\n \n ax.clear()\n for index, (ax, state) in enumerate(zip([ax, ax], ['train', 'valid'])):\n data = history[state][ metric ]\n ax.plot(data, label=f'{state}')\n ax.set_ylabel(f'{metric}', fontdict={'fontsize': 12})\n ax.set_xlabel('Epochs'); \n ax.set_title(f'{metric}', fontdict={'fontsize': 12})\n ax.legend()\n \n def update(self, history):\n \"\"\"Call function to update the figure with plots.\"\"\"\n \n # number of plots (axes) is equal to number of metrics.\n for index, ax in enumerate(self.axes.flatten()):\n animation.FuncAnimation(self.fig, self._animate(history, ax, self.metrics[index]), interval=1000)\n ax.grid(); plt.pause(0.001)\n\nclass RunningAverage():\n \"\"\"A simple class that maintains the running average of a quantity.\n \n Example:\n \n loss_avg = RunningAverage()\n loss_avg.update(2)\n loss_avg.update(4)\n loss_avg() = 3 \"\"\"\n \n def __init__(self):\n self.steps = 0\n self.total = 0\n \n def update(self, val):\n self.total += val\n self.steps += 1\n \n def __call__(self):\n return self.total/float(self.steps)\n \n \nclass Params():\n \"\"\"Class that loads hyperparameters from a json file.\n Example:\n \n params = Params(json_path)\n print(params.learning_rate)\n params.learning_rate = 0.5 # change the value of learning_rate in params.\"\"\"\n\n def __init__(self, json_path):\n with open(json_path) as f:\n params = json.load(f) \n self.__dict__.update(params)\n\n def save(self, json_path):\n \"\"\"Save existing dict to json_path\"\"\"\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)\n \n def update(self, json_path):\n \"\"\"Loads parameters from json file\"\"\"\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n @property\n def dict(self):\n \"\"\"Here is a magic. It's allow to get access to dict keys just like as method.\n Gives dict-like access to Params instance by `params.dict['learning_rate']\"\"\"\n return self.__dict__\n \n \ndef set_logger(log_path):\n \"\"\"Set the logger to log info in terminal and file `log_path`.\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n Example:\n \n logging.info(\"Starting training...\")\n \n Inputs:\n - log_path: (string) where to log.\"\"\"\n \n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n \n\ndef save_dict_to_json(d, json_path):\n \"\"\"Saves dict of floats in json file.\n Inputs:\n - d: (dict) of float-castable values (np.float, int, float, etc.)\n - json_path: (string) path to json file.\"\"\"\n \n with open(json_path, 'w') as f:\n # We need to convert the values to float for json (it doesn't accept np.array, np.float, )\n d = {k: float(v) for k, v in d.items()}\n json.dump(d, f, indent=4) \n \n \ndef save_checkpoint(state, is_best, checkpoint):\n \"\"\"Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves\n checkpoint + 'best.pth.tar'\n Args:\n state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict\n is_best: (bool) True if it is the best model seen till now\n checkpoint: (string) folder where parameters are to be saved\n \"\"\"\n filepath = os.path.join(checkpoint, 'last.pth.tar')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n# else:\n# print(\"Checkpoint Directory exists! \")\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))\n\n\ndef load_checkpoint(checkpoint, model, optimizer=None):\n \"\"\"Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of\n optimizer assuming it is present in checkpoint.\n Args:\n checkpoint: (string) filename which needs to be loaded\n model: (torch.nn.Module) model for which the parameters are loaded\n optimizer: (torch.optim) optional: resume optimizer from checkpoint\n \"\"\"\n if not os.path.exists(checkpoint):\n raise(\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint\n \n\ndef random_split_df(df, valid_size=0.2):\n \"\"\"Функция шафлит df и берет первые по порядку \n для обучающей выборки и последние valid_size \n для валидационной выборки. Возвращает два датафрейма\n с новыми индексами по порядку, потому что для \n датасета они нужны по порядку. То есть делает \n reset_index().\n \n Inputs:\n - ds: dataframe.\n - valid_size: size of validation set.\n \n Returns:\n - train: the same structure dataframe.\n - valid: the same structure dataframe.\"\"\"\n \n valid_idx = int(len(df)*valid_size)\n \n # shuffle dataframe\n df = df.sample(frac=1).reset_index(drop=True)\n train = df.iloc[ :-valid_idx, :]\n valid = df.iloc[ -valid_idx:, :]\n \n # reset indexing in output dfs\n train.reset_index(drop=True, inplace=True)\n valid.reset_index(drop=True, inplace=True)\n return train, valid\n\n\ndef trainer(model, optimizer, scheduler, loss_function, \n epochs, training_data, validation_data, save_best=False):\n \"\"\"It's my own trainer function. \n This function is apropriate for image classification \n task. History write only epochs.\"\"\"\n \n # should be init automat\n history = { \n \"train\": {\"lr\": [], \"lr1\": [], \"lr2\": [], \"betas\": [], \"betas1\": [], \"betas2\": [], \"loss\": [], \"metric\": []},\n \"valid\": {\"loss\": [], \"lr\": [], \"metric\": []}}\n \n for epoch in range(epochs):\n training_loss = validation_loss = max_metric = 0.0\n alpha = 0.8\n \n # loop over training and validation for each epoch\n for dataset, training in [(training_data, True), (validation_data, False)]:\n correct = total = 0\n torch.set_grad_enabled(training)\n model.train(training)\n t = tqdm.tqdm_notebook(dataset)\n batch_len = len(t) - 1 \n \n # loop over dataset\n for batch_idx, (images, labels) in enumerate(t): \n images, labels = images.to(device), labels.to(device)\n model.zero_grad()\n scores = model(images)\n loss = loss_function(scores, labels)\n \n # calculate metrics\n predictions = torch.argmax(scores, dim=1)\n correct += (predictions == labels).sum().item()\n total += len(labels) \n accuracy = round(correct / total, 3)\n \n # do all stuff for train and validation\n if training:\n loss.backward()\n # calc moving average loss\n if batch_idx==0: training_loss = loss.item()\n training_loss_ = loss.item()\n training_loss = (alpha*training_loss) + ((1-alpha)*training_loss_)\n t.set_postfix(epoch=epoch, training_loss=training_loss,\n accuracy=accuracy, refresh=False)\n \n # track history\n history['train']['lr'].append(optimizer.param_groups[0]['lr'])\n# history['train']['lr1'].append(optimizer.param_groups[1]['lr'])\n# history['train']['lr2'].append(optimizer.param_groups[2]['lr'])\n \n history['train']['betas'].append(optimizer.param_groups[0]['betas'])\n# history['train']['betas1'].append(optimizer.param_groups[1]['betas'])\n# history['train']['betas2'].append(optimizer.param_groups[2]['betas'])\n \n history['train']['loss'].append(training_loss)\n history['train']['metric'].append(accuracy)\n # update weights\n optimizer.step()\n scheduler.step()\n else:\n validation_loss = loss.item() \n t.set_postfix(epoch=epoch, validation_loss=validation_loss,\n accuracy=accuracy, refresh=False)\n # track history\n history['valid']['loss'].append(validation_loss)\n history['valid']['metric'].append(accuracy)\n # save history at the epoch's end\n if save_best and batch_idx == batch_len and accuracy > max_metric:\n max_metric = accuracy\n torch.save(model.state_dict(), \"BEST_WEIGHTS.pt\") \n print(f\"Best model with accuracy: {round(accuracy, 3)} saved to BEST_WEIGHTS.pt\")\n return history\n\n\n# ============================================\n# Visualization utils\n# ============================================\n\n\n# TODO: Rewrite input args to apropriate vars\ndef check_augm_show_images(batch_size, dl, rows=3, cols=4):\n \"\"\"For simple classification task. To check the \n augmentation we are ploting images with augm.\"\"\"\n \n # check that we have enough images in batch\n assert rows*cols <= batch_size, \\\n \"You're trying to plot more images then batch_size.\"\n \n images, labels = next(iter(dl))\n fig, axes = plt.subplots(nrows=rows, ncols=cols, \n figsize=(14, 10))\n for i, ax in enumerate(axes.flatten()):\n ax.imshow(reverse_transform(images[i]))\n ax.axis('off')\n plt.tight_layout()\n \n \ndef show_lr_and_moms(history):\n \"\"\"Plot learning rate and momentum. We do this \n to check learning schem. How lr and momentum \n are changing.\"\"\"\n \n # TODO: add check that structure of history \n # variable is appropriate and has necesery keys.\n \n fig, axes = plt.subplots(1, 2, figsize=(14, 5))\n axes[0].plot(history['train']['lr']);\n axes[0].set_title('Learning rate changes over training');\n axes[0].set_xlabel('Iterations')\n axes[0].set_ylabel('Learning rate')\n \n axes[1].plot(history['train']['betas']);\n axes[1].set_title('Momentum changes over training');\n axes[1].set_xlabel('Iterations')\n axes[1].set_ylabel('Momentum')\n \n \ndef moving_average(sequence, alpha=0.999):\n \"\"\"This function takes input sequence and average.\"\"\"\n \n avg_loss = sequence[0]\n average = []\n for n, o in enumerate(sequence):\n avg_loss = (alpha*avg_loss) + ((1-alpha)*o)\n average.append(avg_loss)\n return average\n\n\ndef reverse_transform(image: torch.Tensor) -> np.ndarray:\n '''Convert tensor on which performed ToTensor, \n Normilization torch transformation back to numpy \n array appropriate for plotting with matplotlib.\n Does handle cuda and cpu tensors.\n \n Inputs:\n - image: torch.Tensor with shape [C x H x W].\n Normilized with imagenet_stats.\n \n Returns:\n - image_np: np.ndarray with size [H x W x C].\n Range of values between 0, 255. Appropriate\n for plotting with matplotlib.'''\n \n mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n image_np = image.cpu().numpy().transpose((1, 2, 0))\n image_np = 255 * np.clip( \n (std * image_np + mean), a_min=0, a_max=1 )\n return image_np.astype(np.uint8)\n\n\ndef show_train_results(history : dict):\n \"\"\"Plot training history.\n Inputs:\n - history: dict with keys: train, valid with \n results of training. \n - step: lenght of train and valid sets not \n equal so we plot train history with step \n gaps. Validation history plots fully.\"\"\"\n \n # We need this since train and val lengh is different\n step = len(history['train']['metric']) // len(history['valid']['metric'])\n \n fig, axes = plt.subplots(1, 2, figsize=(16, 5))\n axes[0].plot(history['train']['loss'][::step], label='train')\n axes[0].plot(history['valid']['loss'], label='valid')\n axes[0].set_xlabel('Iterations')\n axes[0].set_ylabel('Loss')\n axes[0].legend()\n \n axes[1].plot(history['train']['metric'][::step], label='train')\n axes[1].plot(history['valid']['metric'], label='valid')\n axes[1].set_xlabel('Iterations')\n axes[1].set_ylabel('Metric')\n axes[1].legend()\n plt.tight_layout()\n\n\n# ============================================\n# Segmentation\n# ============================================\ndef show_segment_batch(images: torch.Tensor, masks: torch.Tensor, \n how_many_plot: int, alpha=0.5): \n \"\"\"Function which plots images and masks batches\n returned from torch.Dataloader. Retruns nothing \n but plot images with masks. Always plots 3 columns. \n Rows only vary. Very important to set N in cmap\n which means number of colors. Also vmin and vmax\n in imshow of mask to set scale of integers in mask.\n \n Inputs:\n - images: torch.Tensor with shape [N x CH x H x W].\n Images normalized in accordance with imagenet, \n where N is a batch size and CH is number of \n channles, three for RGB images.\n - masks: torch.Tensor with shape [N x C x H x W]\n where C is a number of classes. Separate binary \n mask for each class.\n - how_many_plot: number of images to plot. Takes\n from the start, not random.\"\"\"\n \n # init some variables\n class_to_color = {\"background\": 'black',\n \"orange\": 'orange',\n \"banana\": 'khaki',\n \"tomato\": 'red',\n \"apple\": 'green',\n \"lemon\": 'yellow'}\n int_to_color = {0: \"black\", \n 1: \"orange\", \n 2: \"khaki\", \n 3: \"red\", \n 4: \"green\", \n 5: \"yellow\"}\n _nrows = 3\n cmap = colors.ListedColormap(list(int_to_color.values()), N=6)\n \n fig, axes = plt.subplots(nrows=int(np.ceil(how_many_plot/_nrows)), \n ncols=_nrows, figsize=(13, 9))\n for i, ax in enumerate(axes.flatten()):\n if i > how_many_plot-1: return \n ax.imshow( reverse_transform(images[i]) )\n uniq_classes_in_mask = sorted(masks[i].unique().cpu().numpy())\n mask = torch.squeeze(masks[i], dim=0).cpu().numpy().astype(np.uint8)\n ax.imshow( mask, vmin=0, vmax=5, alpha=alpha, cmap=cmap)\n ax.axis('off')\n plt.tight_layout()\n \n \n# ============================================\n# Fit One CYcle\n# ============================================\nclass MyScheduler(object):\n \"\"\"Updated base scheduler class to be appropriate to my OneCycle.\"\"\"\n def __init__(self, optimizer, last_epoch=-1):\n if not isinstance(optimizer, torch.optim.Optimizer):\n raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))\n self.optimizer = optimizer\n if last_epoch == -1:\n for group in optimizer.param_groups:\n group.setdefault('initial_lr', group['lr'])\n else:\n for i, group in enumerate(optimizer.param_groups):\n if 'initial_lr' not in group:\n raise KeyError(\"param 'initial_lr' is not specified \"\n \"in param_groups[{}] when resuming an optimizer\".format(i))\n self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))\n self.step(last_epoch + 1)\n self.last_epoch = last_epoch\n\n def state_dict(self):\n \"\"\"Returns the state of the scheduler as a :class:`dict`. It contains an entry for every \n variable in self.__dict__ which is not the optimizer.\"\"\"\n return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}\n\n def load_state_dict(self, state_dict):\n \"\"\"Loads the schedulers state.\n Inputs:\n - state_dict (dict): scheduler state. Should be an object returned from a call \n to :meth:`state_dict`.\"\"\"\n self.__dict__.update(state_dict)\n\n def get_lr(self):\n raise NotImplementedError\n\n def step(self, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n self.last_epoch = epoch\n for param_group, lr, mom in zip(self.optimizer.param_groups, self.get_lr(), self.get_moms()):\n param_group['lr'] = lr\n param_group['betas'] = (mom, 0.99)\n \n \nclass OneCycle(MyScheduler):\n def __init__(self, epochs, optimizer, div_factor, pct_start, dl_len, last_epoch=-1):\n \"\"\"Суть в том, что эта схема обучения применяеться для всех\n эпох, а не для каждой по отдельности. То есть эта общая схем для всех\n эпох. То есть каждая эпоха не имеет свой отдельный цыкл! Один цыкл для \n всех эпох. Важно для понимания.\"\"\"\n # uppack all param groups\n lrs = [group['lr'] for group in optimizer.param_groups]\n self.last_epoch = last_epoch\n self.max_lr = lrs[0]\n self.eta_min = self.max_lr / div_factor\n self.num_iterations = dl_len * epochs\n self.upward_steps = int(self.num_iterations * pct_start)\n self.max_moms=0.95\n self.min_moms=0.85\n super(OneCycle, self).__init__(optimizer, last_epoch)\n \n def _calculate_schedule(self, lr):\n \"\"\"Calculate one cycle policy curves for learning rate and momentum for \n each param group in optimizer. Calculating curves once.\n \n TODO: Currently we calculate these curves each time we call step func.\n But we can calculate these curves only once and them call reqauired value\n instead of calculating them each time.\n \n Inputs:\n - lr: (float) maximum learning rate in cycle. We defined it using learning\n rate search algorithm.\"\"\"\n \n # calculate one cycle for learning rate\n upward_lr = np.linspace(start=self.eta_min, stop=lr, num=self.upward_steps)\n downward_lr = [(self.eta_min + (lr - self.eta_min) * (1 + math.cos((math.pi*o)/self.num_iterations)) / 2) \n for o in np.linspace(start=0, stop=self.num_iterations, num=self.num_iterations-self.upward_steps)]\n \n # calculate one cycle for momentum\n upward_moms = np.linspace(start=self.max_moms, stop=self.min_moms, num=self.upward_steps)\n downward_moms = [(self.min_moms + (self.max_moms - self.min_moms) * (1 + math.cos((math.pi*o)/self.num_iterations)) / 2) \n for o in np.linspace(start=self.num_iterations, stop=0, num=self.num_iterations-self.upward_steps)]\n \n return [np.concatenate([upward_lr, downward_lr]), np.concatenate([upward_moms, downward_moms])]\n \n def get_lr(self):\n \"\"\"As said before in the above func we call self.last_epoch value to get required last value.\n The same issue with calculating momentums.\"\"\"\n lr = [self._calculate_schedule(base_lr)[0][self.last_epoch] for base_lr in self.base_lrs]\n return lr\n \n def get_moms(self):\n # returns updated mom for each param group\n moms = [self._calculate_schedule(base_lr)[1][self.last_epoch] for base_lr in self.base_lrs]\n return moms\n \n \n \ndef split_model_idx(model, idxs):\n \"\"\"Split `model` according to the indexes in `idxs`.\n \n Example: \n \n len(model)=10; idxs=[5, 8]\n [5, 8] -> [0, 5, 8, 10] -> [0-5, 5-8, 8-10]\n \n remember last value in indexing doesn't count.\"\"\"\n \n assert type(idxs)==list, 'idxs should be list of integers.'\n layers = list(model.children())\n if idxs[0] != 0: idxs = [0] + idxs\n if idxs[-1] != len(layers): idxs.append(len(layers))\n return [nn.Sequential(*layers[i:j]) for i,j in zip(idxs[:-1],idxs[1:])]\n\n\ndef create_opt(optimizer, group_layers, lrs, wd):\n \"\"\"Assign lrs to group layer.\"\"\"\n assert len(group_layers)==len(lrs), 'Len of groups should be equal to len of lrs.'\n return optimizer([{'params': x.parameters(), 'lr': lr} for x, lr in zip(group_layers, lrs)], weight_decay=wd)\n\n\ndef get_optim(model, idx_to_split_model, optimizer, lrs, wd):\n \"\"\"Combine two above functions.\n Inpust:\n - model: (torch.nn.Module)\n - idx_to_split_model: (list) integer in list used to split model to group layers.\n - lrs: (list) list of learnings rates for each group layer, e.g. [0.1, 0.001, 0.0001].\"\"\"\n group_layers = split_model_idx(model, idx_to_split_model)\n optimizer = create_opt(optimizer, group_layers, lrs, wd)\n return optimizer\n\n\ndef save_model_history_graphics(history, model_dir):\n \"\"\"Func is used to show single model training results. \n Could be used to track overfitting or underfiting, \n learning rate inconsistent or else. Num plots equal \n to num_metrics. TODO: assert history.\n Inputs:\n - history: (dict) containing history of train.\n - model_dir: (string) model dir.\"\"\"\n \n # unpach list of metrics from dict\n metrics = list(history['train'].keys())\n num_metircs = len(metrics)\n \n fig, axes = plt.subplots(1, num_metircs, figsize=(12, 6))\n for index, ax in enumerate(axes.flatten()):\n ax.plot(history['train'][ metrics[index] ], label='train')\n ax.plot(history['valid'][ metrics[index] ], label='valid')\n ax.set_ylabel(f'{metrics[index]}', fontdict={'fontsize': 14})\n ax.set_xlabel('Epochs'); ax.grid(); ax.legend()\n ax.set_title(f'{metrics[index]}', fontdict={'fontsize': 14})\n plt.tight_layout()\n \n # save figure on the dist\n figure_file_name = f'{model_dir}/training_history.png'\n plt.savefig(figure_file_name) \n \n \ndef find_lr(net, dataloader, optimizer, loss_function, init_value=1e-7, final_value=10., beta=0.95):\n \"\"\"https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html#how-do-you-find-a-good-learning-rate.\n We plot exponentially weighted average loss. To stop a algorighm \n when loss exploted we define formula. We take 0 index from params \n group for searching.\n Fast ai do the next for tune-tuning. Find lr for train classifier.\n Than find another lr for first group and for classifier use lr from \n first stage / 10.\n \n Example: \n \n log_lrs, losses = find_lr(model, dls['train'], optimizer, loss_function, beta=0.95)\n plot_lr_find(log_lrs, losses)\n \n Inputs:\n - beta: (float) used for loss smoothing.\"\"\"\n \n num = len(dataloader)-1 # number of iterations\n lr = init_value # initilize learning rate\n mult = (final_value / init_value) ** (1/num) # multiplication factor to increase lr\n optimizer.param_groups[0]['lr'] = lr # take first param group\n avg_loss, best_loss = 0., 0.\n losses, log_lrs = [], []\n \n for batch_num, (inputs, labels) in tqdm.tqdm_notebook(enumerate(dataloader, start=1)):\n inputs, labels = inputs.to(device), labels.to(device)\n \n # get scores\n optimizer.zero_grad()\n outputs = net(inputs)\n \n # calculate and smooth loss\n loss = loss_function(outputs, labels)\n avg_loss = beta * avg_loss + (1-beta) *loss.item()\n smoothed_loss = avg_loss / (1 - beta**batch_num)\n \n # stop if the loss is exploding\n if batch_num > 1 and smoothed_loss > 4 * best_loss:\n return log_lrs, losses\n \n # record the best loss, why we need this?\n if smoothed_loss < best_loss or batch_num==1:\n best_loss = smoothed_loss\n \n # store the values\n losses.append(smoothed_loss)\n log_lrs.append(np.log(lr))\n \n # do the optimizer step\n loss.backward()\n optimizer.step()\n \n # increase the lr for the next step\n lr *= mult\n optimizer.param_groups[0]['lr'] = lr\n \n return log_lrs, losses\n\n\ndef plot_lr_find(log_lrs, losses):\n _, ax = plt.subplots(1,1)\n ax.plot(np.exp(log_lrs)[10:-4],losses[10:-4]);\n ax.set_xscale('log');\n ax.set_xlabel('learning rate');\n ax.set_ylabel('loss');\n \n \n \n\n","repo_name":"linkyeu/code_example","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":31043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13549219348","text":"import numpy as np\nimport datetime\nimport pandas as pd\nimport keras\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom createDataset import parseJson, create_dataset\nfrom pymongo import MongoClient\nimport os\n\npath = os.path.dirname(os.path.realpath(__file__))\n\n\ndef train_model(lang,total=False,output=1,look_back=7):\n language = lang\n trainSeq, testSeq, dataset = parseJson(language,total)\n scaler = MinMaxScaler(feature_range=(0, 1))\n dataSequence = scaler.fit_transform(dataset)\n inputX, inputY = create_dataset(dataSequence, look_back=look_back)\n\n inputX = np.reshape(inputX, (inputX.shape[0], 1, inputX.shape[1]))\n # inverse=scaler.inverse_transform(dataSequence)\n\n model = Sequential()\n model.add(LSTM(10, input_shape=(1, look_back)))\n model.add(Dense(output))\n model.compile(loss='mean_squared_error', optimizer='adam')\n model.fit(inputX, inputY, epochs=int(float(dataset.shape[0]) / 10), batch_size=100, verbose=10)\n model.save(path + '/models/%smodel.h5' % language)\n del model\n\n\ndef pred(timestamp, lang):\n # time_value[timestamp,repo_number]\n time_value = pd.read_csv('datas/%sdata.csv' % lang, encoding='gb18030')\n # bottom of time_value\n time_last = time_value.loc[time_value.shape[0] - 1]['timestamp']\n # input of model\n datas = np.array(time_value.loc[time_value.shape[0] - 7:time_value.shape[0] - 1]['repo_number'])\n datas = np.reshape(datas, [7, 1])\n # normalize input\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n datas = scaler.fit_transform(datas)\n dataSequence = np.reshape(datas, (1, 1, 7))\n # load model\n model = load_model(path + '/models/%smodel.h5' % lang)\n # predict\n pre = scaler.inverse_transform(model.predict(dataSequence))\n # write in csv\n time_last = datetime.datetime.strptime(time_last, \"%Y-%m-%d %H:%M:%S\")\n time_new = time_last + datetime.timedelta(days=1)\n time_value.append([time_new, pre])\n time_value.to_csv(path + 'datas/%sdata.csv' % lang, encoding='gb18030')\n\n # 清理内存\n del time_value\n del model\n\n if ((timestamp - time_last).days == 1):\n tempSeq = None\n\n return 1\n else:\n return predTill(timestamp + datetime.timedelta(days=-1), lang)\n\n\ndef predTill(timestamp, lang):\n # check constraint\n lang_list=['C#','C++','CSS','HTML','Java','JavaScript','PHP','Python','Ruby','TypeScript','Perl','C']\n if lang not in lang_list:\n return \"[ERROR]: No model found for %s\"%lang\n # load data\n time_value = pd.read_csv(path + '/datas/%sdata.csv' % lang, encoding='gb18030')\n time_value = pd.DataFrame(time_value, columns=['timestamp', 'repo_number'])\n time_value = np.array(time_value)\n\n # the date of latest data in csv file\n time_last = time_value[time_value.shape[0] - 1][0]\n time_last = datetime.datetime.strptime(time_last, \"%Y-%m-%d %H:%M:%S\")\n\n if (timestamp <= time_last):\n return \"[ERROR]: Please select a date ahead of today\"\n\n # clear old model and load new model\n keras.backend.clear_session()\n model = load_model(path + '/models/%smodel.h5' % lang)\n # output\n pred = []\n\n while ((timestamp - time_last).days != 0):\n # get last 7 days' data\n datas = np.array(time_value[time_value.shape[0] - 7:time_value.shape[0], 1])\n datas = np.reshape(datas, [7, 1])\n scaler = MinMaxScaler(feature_range=(0, 1))\n datas = scaler.fit_transform(datas)\n dataSequence = np.reshape(datas, (1, 1, 7))\n pre=model.predict(dataSequence)\n pre = scaler.inverse_transform(pre)\n print(pre[0][0])\n\n time_last = time_last + datetime.timedelta(days=1)\n print(time_value.shape)\n update = np.reshape(['pre', pre[0][0]], [1, 2])\n time_value = np.append(time_value, update, axis=0)\n print(time_value.shape)\n # output\n # pred.append(pre)\n pred = np.append(pred, int(pre))\n\n del model\n del time_value\n\n return pred\n\ndef predDays(days, lang):\n # check constraint\n lang_list=['C#','C++','CSS','HTML','Java','JavaScript','PHP','Python','Ruby','TypeScript','Perl','C']\n if lang not in lang_list:\n return lang_list\n # load data\n # time_value = pd.read_csv(path + '/datas/%sdata.csv' % lang, encoding='gb18030')\n # time_value = pd.DataFrame(time_value, columns=['timestamp', 'repo_number'])\n #\n # time_value = np.array(time_value)\n\n connection = MongoClient('0.0.0.0', 27017)\n\n db = connection.GHUserAnalyse\n\n set = db.Top10Lang\n\n results = set.find({'language': lang})\n\n dataset = []\n for result in results:\n dataset.append([result['timestamp'], result['number']])\n\n dataset = np.array(dataset)\n\n connection.close()\n\n time_value = dataset\n\n\n\n # clear old model and load new model\n keras.backend.clear_session()\n model = load_model(path + '/models/%smodel.h5' % lang)\n # output\n pred = []\n\n while (days!= 0):\n # get last 7 days' data\n datas = np.array(time_value[time_value.shape[0] - 7:time_value.shape[0], 1])\n datas = np.reshape(datas, [7, 1])\n # scale data\n scaler = MinMaxScaler(feature_range=(0, 1))\n datas = scaler.fit_transform(datas)\n dataSequence = np.reshape(datas, (1, 1, 7))\n # predict\n pre=model.predict(dataSequence)\n # inverse data\n pre = scaler.inverse_transform(pre)\n\n update = np.reshape(['pre', pre[0][0]], [1, 2])\n time_value = np.append(time_value, update, axis=0)\n\n pred = np.append(pred, int(pre))\n days-=1\n\n del model\n del time_value\n\n return pred\n\ndef predTotal(days):\n# load data\n\n connection = MongoClient('0.0.0.0', 27017)\n\n db = connection.GHUserAnalyse\n\n set = db.TotalRepoAmount\n\n results = set.find({})\n\n\n dataset = []\n for result in results:\n dataset.append([result['timestamp'], result['amount']])\n\n dataset = np.array(dataset)\n print(dataset)\n connection.close()\n\n\n time_value = dataset\n keras.backend.clear_session()\n model = load_model(path + '/models/Totalmodel.h5')\n pred = []\n\n while (days!= 0):\n # get last 7 days' data\n datas = np.array(time_value[time_value.shape[0] - 7:time_value.shape[0], 1])\n datas = np.reshape(datas, [7, 1])\n # scale data\n scaler = MinMaxScaler(feature_range=(0, 1))\n datas = scaler.fit_transform(datas)\n dataSequence = np.reshape(datas, (1, 1, 7))\n # predict\n pre=model.predict(dataSequence)\n # inverse data\n pre = scaler.inverse_transform(pre)\n\n update = np.reshape(['pre', pre[0][0]], [1, 2])\n time_value = np.append(time_value, update, axis=0)\n\n pred = np.append(pred, int(pre))\n days-=1\n\n del model\n del time_value\n\n return pred\n\n\n# a=predTill(datetime.datetime(2018,11,23),'JavaScript')\n# print(a)\n# train_model('JavaScript')\n# train_model('Perl')\n# train_model(lang=\"Total\",total=True)\n# train_model(\"Total\",total=True)\n# a=predTotal(12)\n# print(a)","repo_name":"cheng-kun/github-user-behavior-analysis","sub_path":"backend/statistic/trendModel/predService.py","file_name":"predService.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"24695715745","text":"\n############################# Imports ###############################\n\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path\n\nfrom model import G, D\nfrom dataset import Dataset\n\nfrom IPython import embed\n\n############################# Declarations ##########################\n\ndef save_intermediate_results(save_pth, data):\n\n y_axis = list(np.array(data.view(-1)))\n x_axis = [i for i in range(len(y_axis))]\n\n plt.clf()\n plt.plot(x_axis, y_axis, '.-', label='line 1', linewidth=2)\n Path('results').mkdir(parents=True, exist_ok=True)\n plt.savefig(save_pth)\n\ndef train(config):\n\n ################################### Model Initalization ####################################\n\n dataset = Dataset(config['dir_csv']) # Class declaraction --> Runs ___init___ with Dataset\n dataloader = DataLoader(dataset, batch_size=1) # Built-in class within Pytorch \n \n '''\n Save Ground Truth Plots\n '''\n if True:\n\n Path('ground_truths').mkdir(parents=True, exist_ok=True)\n for data in dataloader:\n \n real_data = data['real_data']\n basename = data['basename'][0]\n\n save_intermediate_results(os.path.join('ground_truths', basename + '.jpg'), real_data)\n \n gen = G().to(config['device'])\n dsc = D().to(config['device'])\n \n optimizer_G = torch.optim.Adam(gen.parameters(), lr=config['lr'])\n optimizer_D = torch.optim.Adam(dsc.parameters(), lr=config['lr'])\n\n real_label = torch.tensor(1.0).view(1, -1).to(config['device']) # Tensor of shape (1, 1)\n fake_label = torch.tensor(0.0).view(1, -1).to(config['device']) # Tensor of shape (1, 1)\n\n criterion = nn.BCELoss() # Binary Cross Entropy Loss\n\n fixed_noise = torch.rand((1, 100)).to(config['device'])\n\n for epoch in range(config['n_epoch']):\n \n for data in dataloader:\n\n real_data = data['real_data'].to(config['device'])\n\n ##################### Optimize for Generator ##########################\n \n optimizer_G.zero_grad()\n \n fake_data = gen(fixed_noise) # (1, 100) -> (1, 1, 800)\n pred = dsc(fake_data) # (1, 1, 800) -> (1, 1)\n G_loss = criterion(pred, real_label) # Train the generator to fool the discriminator\n \n '''\n Optimize\n '''\n G_loss.backward()\n optimizer_G.step()\n \n ##################### Optimize for Discriminator ######################\n \n optimizer_D.zero_grad()\n\n '''\n Real Input\n '''\n pred = dsc(real_data) # (1, 1, 800) -> (1, 1)\n D_loss_real = criterion(pred, real_label) # Train the discriminator to distinguish between real and fake data\n\n '''\n Fake Input\n '''\n pred = dsc(fake_data.detach()) # (1, 1, 800) -> (1, 1)\n D_loss_fake = criterion(pred, fake_label) # Train the discriminator to distinguish between real and fake data\n\n '''\n Optimize\n '''\n D_loss_total = (D_loss_real + D_loss_fake) / 2\n D_loss_total.backward()\n optimizer_D.step()\n \n if (((epoch + 1) % config['val_epoch']) == 0):\n\n Path('results').mkdir(parents=True, exist_ok=True)\n save_intermediate_results(os.path.join('results', 'epoch_%d.jpg'%(epoch + 1)), fake_data.detach().cpu())\n\n print('[Epoch] %d / %d'%(epoch + 1, config['n_epoch']), end='\\r')\n\nif __name__=='__main__':\n\n '''\n Fixed Seeds for Consistency \n '''\n torch.manual_seed(0)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(0)\n random.seed(0)\n\n '''\n Configs\n '''\n config = {\n\n 'device' : torch.device('cuda') if (torch.cuda.is_available()) else torch.device('cpu'), # Device to train with\n 'n_epoch' : 400, # Number of total epochs to run\n 'lr' : 0.0001, # Learning Rate\n 'dir_csv' : 'real_data', # Directory of samples\n 'val_epoch' : 20 # Interval to view results\n\n }\n\n '''\n Enter Main Function\n '''\n train(config)\n\n\n","repo_name":"Tom2096/Deep-Learning-Generating-Data-For-Medical-Research","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20733324226","text":"# help(dict(a=1).items())\n# print(*dir(dict(a=1).items()), sep='\\n')\n# print(type())\n\nfrom pprint import pprint as pp\nimport sys\nimport builtins\n\n\"\"\"\n\nВам дано описание наследования классов в следующем формате.\n<имя класса 1> : <имя класса 2> <имя класса 3> ... <имя класса k>\nЭто означает, что класс 1 отнаследован от класса 2, класса 3, и т. д.\n\nИли эквивалентно записи:\n\nclass Class1(Class2, Class3 ... ClassK):\n pass\nКласс A является прямым предком класса B, если B отнаследован от A:\n\n\nclass B(A):\n pass\n\n\nКласс A является предком класса B, если\nA = B;\nA - прямой предок B\nсуществует такой класс C, что C - прямой предок B и A - предок C\n\nНапример:\nclass B(A):\n pass\n\nclass C(B):\n pass\n\n# A -- предок С\n\n\nВам необходимо отвечать на запросы, является ли один класс предком другого класса\n\nВажное примечание:\nСоздавать классы не требуется.\nМы просим вас промоделировать этот процесс, и понять существует ли путь от одного класса до другого.\nФормат входных данных\nВ первой строке входных данных содержится целое число n - число классов.\n\nВ следующих n строках содержится описание наследования классов. \nВ i-й строке указано от каких классов наследуется i-й класс. \nОбратите внимание, что класс может ни от кого не наследоваться. \nГарантируется, что класс не наследуется сам от себя (прямо или косвенно),\nчто класс не наследуется явно от одного класса более одного раза.\n\nВ следующей строке содержится число q - количество запросов.\n\nВ следующих q строках содержится описание запросов в формате <имя класса 1> <имя класса 2>.\nИмя класса – строка, состоящая из символов латинского алфавита, длины не более 50.\n\nФормат выходных данных\nДля каждого запроса выведите в отдельной строке слово \"Yes\", если класс 1 является предком класса 2, и \"No\", если не является.\n\nSample Input:\n\n4\nA\nB наследуется от A\nC : A\nD : B C\n4\nA предок B?\nB D\nC D\nD A\nSample Output:\n\nYes\nYes\nYes\nNo\n\n\n\nchild parents\nB : [A, ]\nC: [A]\nD: [B, C]\n\n\nclassK classD\nclassH classG\n\"\"\"\n# import sys\n# sys.stdin = open(\"test.txt\", \"r\")\n\n# здесь три решения, каждое в своей функции :\n# dfs_paths\n# show_relation\n# show_relation2\n# работает только решение dfs_paths\nhierarchy = {}\nanswers = []\nrelations = {('parent', 'index'): None}\ncounter = 0\n# в этой функции ошибок нет\ndef put_in_hierarchy(enter):\n if ':' in enter:\n child, parents = enter.split(':')\n child = child.strip()\n parents = set(map(lambda elem: elem.strip(), parents.strip().split()))\n hierarchy[child] = parents\n else:\n cls = enter.strip()\n hierarchy[cls] = set()\n\ndef put_in_relations(enter):\n if ':' in enter:\n child, parents = enter.split(':')\n child = child.strip()\n parents = list(map(lambda elem: elem.strip(), parents.strip().split()))\n check_key = lambda item, elem: elem == item[0]\n global counter\n for cls in parents:\n relations[(cls, counter)] = child\n counter += 1\n\ndef show_relations2(child, ancestor):\n for i in range(counter + 1):\n if relations.get((ancestor, i)) == child:\n return True\n return False\n\n\n\ndef dfs_paths(graph, child, ancestor):\n stack = [(child, [child, ])]\n while stack:\n (vertex, path) = stack.pop()\n if vertex in graph.keys():\n for next_ in graph[vertex] - set(path):\n if next_ == ancestor:\n yield path + [next_]\n else:\n stack.append((next_, path + [next_, ]))\n\n\n\n\ndef show_relation(ancestor, child):\n if child in hierarchy:\n if ancestor in hierarchy[child]:\n return True\n for cls in hierarchy[child]:\n show_relation(child, cls)\n return False\n\n\n\n\n\ncount_enters = int(input())\nfor i in range(count_enters):\n enter = input()\n put_in_hierarchy(enter)\n\ncounter_outputs = int(input())\nfor i in range(counter_outputs):\n enter = input()\n\n ancestor, child = enter.split()\n # answers.append(show_relation(ancestor, child))\n if ancestor == child:\n answers.append('Yes')\n else:\n data = list(dfs_paths(hierarchy, child, ancestor))\n # data = show_relations2(child, ancestor)\n if data:\n answers.append('Yes')\n else:\n answers.append('No')\n\n\nprint(*answers, sep='\\n')\n\n\n\n\n\n\n\n# првоверка put_in_hierarchy. Она работает правильно.\n# data_for_check = {'classE': ['classD', 'classF', 'classK', 'classL'], 'classH': ['classL'], 'classK': ['classH', 'classL'], 'classG': ['classF'], 'classC': ['classE', 'classD', 'classH', 'classK', 'classL'], 'classB': ['classC', 'classE', 'classG', 'classH', 'classK', 'classL'], 'classA': ['classB', 'classC', 'classD', 'classG', 'classH'], 'classF': ['classK'], 'classD': ['classG', 'classH'], 'classL': []}\n#\npp(hierarchy)\n# print()\n# pp(data_for_check)\n#\n# pp(hierarchy == data_for_check)\n\n\n\n\n\n\n# проверка dfs_path\nyes_no = \"\"\"\nYes\nYes\nYes\nYes\nYes\nYes\nNo\nNo\nNo\nNo\nYes\nNo\nNo\nYes\nNo\nYes\nYes\nYes\nNo\nYes\nNo\nNo\nYes\nYes\nNo\nNo\nNo\nYes\nYes\nNo\nYes\nNo\nNo\nNo\nYes\nYes\nYes\nNo\n\"\"\"\nyes_no = set(enumerate(yes_no.split('\\n')[1:-1], start=1))\nlength1 = len(yes_no)\nanswers = set(enumerate(answers, start=1))\n\noutput = list(answers.difference(yes_no))\noutput.sort(key=lambda x: x[0])\n# yes_no = list(yes_no)\n# yes_no.sort(key=lambda x: x[0])\ncounter = 0\nfor i, elem in enumerate(output, start=1):\n counter += 1\n print(f'{elem[0]} - {elem[-1]}')\nprint()\nprint(counter)","repo_name":"Kirill67tyar/tree-of-knowledge","sub_path":"Python/python_base_and_use_stepik/1_/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"40544913341","text":"# bot.py\nimport asyncio\nimport discord\nimport os\nimport uvicorn\n\nfrom cogs import test, user\nfrom models.reminder import Reminder\nfrom services.message_service import MessageService\nfrom services.guild_service import GuildService\n\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nfrom fastapi import FastAPI\nfrom typing import List\n\nload_dotenv()\nTOKEN = os.environ.get(\"PRODUCTION_TOKEN\")\nPREFIX = os.environ.get(\"PRODUCTION_PREFIX\")\n\nif TOKEN == None:\n TOKEN = os.getenv(\"DISCORD_TOKEN\")\n PREFIX = os.getenv(\"COMMAND_PREFIX\")\n\napp = FastAPI()\n\n@app.on_event('startup')\nasync def on_startup():\n bot = commands.Bot(command_prefix=PREFIX)\n cogs = [test, user]\n for cog in cogs:\n cog.setup(bot)\n asyncio.create_task(bot.start(TOKEN))\n\n @bot.event\n async def on_ready():\n GuildService.bot = bot\n await bot.change_presence(activity=discord.Game(name=\"with You\"))\n print(f'{bot.user} has connected to Discord!')\n\n@app.post('/reminder')\nasync def reminder(reminders: List[Reminder]):\n asyncio.create_task(MessageService.send_reminder(reminders))\n return {'data': 'OK'}\n\n@app.get('/check')\nasync def check():\n return {'data': 'OK'}\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=5000)\n","repo_name":"watermelonreborn/jadwalin-discord-bot","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4155965199","text":"from kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.properties import StringProperty\r\nfrom kivy.uix.textinput import TextInput\r\n\r\nBuilder.load_string('''\r\n:\r\n size_hint_y: None\r\n text_size: self.width, None\r\n height: self.texture_size[1]\r\n group: 'test'\r\n canvas.before:\r\n Color:\r\n rgba: .7, .7, .7, 1\r\n Rectangle:\r\n pos: self.pos\r\n size: self.size\r\n\r\n:\r\n id: Img_Data2\r\n\tBoxLayout:\r\n\t\torientation: 'vertical'\r\n\t\tsize: root.size\r\n\t\tpos: root.pos\r\n\t\tLabel:\r\n\t\t\ttext: 'Description'\r\n\t\t\tsize_hint_y: None\r\n\t\t\theight: 30\r\n\t\t\tbold: True\r\n StretchDataBox:\r\n text: Img_Data2.c_description\r\n DescDump:\r\n on_text_validate: Img_Data2.changeDescription(self.text)\r\n Label:\r\n\r\n:\r\n\torientation: 'horizontal'\r\n Button:\r\n text: 'h1'\r\n group: 'test'\r\n ImgData2:\r\n''')\r\n\r\nclass StretchDataBox(Label):\r\n def __init__(self, **kwargs):\r\n super(StretchDataBox, self).__init__(**kwargs)\r\n\r\n def changeContents(self, value):\r\n print(\"StretchDataBox.changeContents()\", value)\r\n\r\nclass DescBox(Label):\r\n def __init__(self, **kwargs):\r\n super(DescBox, self).__init__(**kwargs)\r\n\r\nclass DescDump(TextInput):\r\n def __init__(self, **kwargs):\r\n super(DescDump, self).__init__(**kwargs)\r\n #self.size_hint = (None, None)\r\n #self.size = (100, 30)\r\n #self.multiline = True\r\n self.multiline = False\r\n\r\n def showUs(self):\r\n print(self.text)\r\n self.text = \"\"\r\n\r\n #def on_text_validate(self):\r\n # print(\"DescDump.on_text_validate(): \")\r\n # print(self.Img_Data2)\r\n\r\n\r\n\r\n\r\nclass ImgData2(Widget):\r\n c_description = StringProperty('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Proin vitae turpis ornare urna elementum pharetra non et tortor. Curabitur semper mattis viverra. Pellentesque et lobortis purus, eu ultricies est. Nulla varius ac dolor quis mattis. Pellentesque vel accumsan tellus. Donec a nunc urna. Nulla convallis dignissim leo, tempor sagittis orci sollicitudin aliquet. Duis efficitur ex vel auctor ultricies. Etiam feugiat hendrerit mauris suscipit gravida. Quisque lobortis vitae ligula eget tristique. Nullam a nulla id enim finibus elementum eu sit amet elit.')\r\n def __init__(self, **kwargs):\r\n super(ImgData2, self).__init__(**kwargs)\r\n\r\n def changeDescription(self, value):\r\n print(\"ImgData2.changeDescription()\", value)\r\n f_descBox = self.children[0].children[2]\r\n print(f_descBox)\r\n print(f_descBox.text)\r\n self.c_description = value\r\n print(f_descBox.text)\r\n\r\nclass ContainerBox(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super(ContainerBox, self).__init__(**kwargs)\r\n\r\nclass Nested2App(App):\r\n def build(self):\r\n return ContainerBox()\r\n \r\nif __name__ == '__main__':\r\n Nested2App().run()","repo_name":"hwynn/Kivy-Examples","sub_path":"DoThing/ParentPropertyBind.py","file_name":"ParentPropertyBind.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18413643123","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, View, DetailView, CreateView, UpdateView, DeleteView\nfrom .exceptions import PermissionDenied\nfrom bulletinboard import models\nfrom .models import Post, Category, Profile\nfrom .forms import AnnouncementPostForm, LoginForm, SignupForm, UpdateProfileForm, CommentForm\n\n\nclass HomePageView(ListView):\n model = Post\n template_name = 'bulletinboard/index.html'\n context_object_name = \"latest_announcements\"\n\n def get_queryset(self):\n return models.Post.objects.annotate()[:8]\n\n\nclass AnnouncementsPageView(View):\n template_name = 'bulletinboard/announcements.html'\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n context = {}\n posts = models.Post.objects.annotate()\n context['announcements'] = posts\n return render(request, self.template_name, context)\n else:\n return render(request, self.template_name)\n\n\nclass AnnouncementView(DetailView):\n model = Post\n pk_url_kwarg = 'post_id'\n comment_form = CommentForm\n template_name = 'bulletinboard/announcement_detail.html'\n\n def get(self, request, post_id, *args, **kwargs):\n self.object = self.get_object()\n context = self.get_context_data(object=self.object)\n context['comments'] = models.Comment.objects.filter(in_post__pk=post_id).order_by('-date_publish')\n context['comment_form'] = None\n if request.user.is_authenticated:\n context['comment_form'] = self.comment_form\n return self.render_to_response(context)\n\n @method_decorator(login_required)\n def post(self, request, post_id, *args, **kwargs):\n post = get_object_or_404(Post, pk=post_id)\n form = self.comment_form(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.date_publish = timezone.now()\n comment.author = request.user\n comment.in_post = post\n comment.save()\n return render(request=request, template_name=self.template_name, context={'comment_form': self.comment_form,\n 'post': post,\n 'comments': post.comment_set.order_by(\n '-date_publish')})\n else:\n return render(request=request, template_name=self.template_name, context={'comment_form': form,\n 'post': post,\n 'comments': post.comment_set.order_by(\n '-date_publish')})\n\n\nclass CategoryView(View):\n template_name = 'bulletinboard/category.html'\n\n def get(self, request, *args, **kwargs):\n context = {}\n category = models.Category.objects.annotate()\n context['categories_list'] = category\n return render(request, self.template_name, context)\n\n\nclass AnnouncementCategoryView(View):\n pk_url_kwarg = 'category_id'\n template_name = 'bulletinboard/announcement_category.html'\n\n def get(self, request, category_id, *args, **kwargs):\n context = {}\n category = models.Post.objects.filter(category__pk=category_id)\n context['announcement_categories'] = category\n return render(request, self.template_name, context)\n\n\nclass CreateAnnouncementView(CreateView):\n form_class = AnnouncementPostForm\n template_name = 'bulletinboard/create_announcement.html'\n\n @method_decorator(login_required)\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST, request.FILES)\n context = {}\n if form.is_valid():\n post = form.save(commit=False)\n post.published_date = timezone.now()\n post.author = request.user\n post.save()\n context['post_was_created'] = True\n context['form'] = self.form_class\n return render(request=request, template_name=self.template_name, context=context)\n else:\n context['post_was_created'] = False\n context['form'] = form\n return render(request=request, template_name=self.template_name, context=context)\n\n\nclass EditAnnouncementView(UpdateView):\n model = models.Post\n pk_url_kwarg = 'post_id'\n template_name = 'bulletinboard/edit_announcement.html'\n form_class = AnnouncementPostForm\n\n def dispatch(self, request, *args, **kwargs):\n obj = self.get_object()\n if obj.author != self.request.user:\n raise PermissionDenied(\"You are not author of this post\")\n return super(EditAnnouncementView, self).dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n post_id = self.kwargs['post_id']\n return reverse('bulletinboard:announcement_detail', args=(post_id, ))\n\n\nclass DeleteAnnouncementView(DeleteView):\n model = models.Post\n pk_url_kwarg = 'post_id'\n template_name = 'bulletinboard/delete_announcement.html'\n\n def get_success_url(self):\n post_id = self.kwargs['post_id']\n return reverse('bulletinboard:delete_post_success', args=(post_id,))\n\n\nfrom django.contrib.auth.views import LoginView\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import Http404\n\n\nclass LoginView(LoginView):\n template_name = 'my_auth/login.html'\n form_class = LoginForm\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect(reverse('bulletinboard:home'), request)\n else:\n context = {}\n context['form'] = form\n return render(request=request, template_name=self.template_name, context=context)\n else:\n context = {'form': form}\n return render(request=request, template_name=self.template_name, context=context)\n\n\nclass SignupView(View):\n template_name = 'my_auth/signup.html'\n registration_form = SignupForm\n\n def get(self, request, *args, **kwargs):\n context = {'form': self.registration_form}\n return render(request=request, template_name=self.template_name, context=context)\n\n def post(self, request, *args, **kwargs):\n user_form = SignupForm(data=request.POST)\n registered = False\n if user_form.is_valid():\n user = user_form.save(commit=True)\n user.email = user_form.cleaned_data['email']\n user.save()\n registered = True\n return render(request, 'my_auth/signup.html',\n {'registered': registered})\n else:\n return render(request, 'my_auth/signup.html',\n {'form': user_form,\n 'registered': registered})\n\n\n@login_required\ndef logout_view(request):\n logout(request)\n return redirect(reverse(\"bulletinboard:home\"))\n\n\nclass ProfileView(DetailView):\n model = Profile\n template_name = 'bulletinboard/profile.html'\n\n def get_object(self):\n return get_object_or_404(Profile, user__id=self.kwargs['user_id'])\n\n\nclass EditProfileView(UpdateView):\n model = Profile\n form_class = UpdateProfileForm\n template_name = 'bulletinboard/edit_profile.html'\n slug_field = \"user_id\"\n slug_url_kwarg = \"user_id\"\n\n def dispatch(self, request, *args, **kwargs):\n obj = self.get_object()\n if obj.user != self.request.user:\n raise Http404(\"It is not your profile!\")\n return super(EditProfileView, self).dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n user_id = self.kwargs['user_id']\n return reverse('bulletinboard:profile', args=(user_id,))\n","repo_name":"Vershinin100797/Django-Home-Project","sub_path":"bulletinboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70016252327","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('formacion', '0039_auto_20160101_1526'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='revisioninterventoriadocentesoporte',\n name='evidencia',\n ),\n migrations.AddField(\n model_name='revisioninterventoriadocentesoporte',\n name='evidencia',\n field=models.ForeignKey(blank=True, to='formacion.EvidenciaDocentes', null=True),\n ),\n migrations.RemoveField(\n model_name='revisioninterventoriaescuelaticsoporte',\n name='evidencia',\n ),\n migrations.AddField(\n model_name='revisioninterventoriaescuelaticsoporte',\n name='evidencia',\n field=models.ForeignKey(blank=True, to='formacion.EvidenciaEscuelaTic', null=True),\n ),\n ]\n","repo_name":"Dandresfsoto/Andes","sub_path":"formacion/migrations/0040_auto_20160101_1538.py","file_name":"0040_auto_20160101_1538.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"23201716694","text":"def swap_case(s):\n S = \"\"\n for i in s:\n S= S+\"\".join([i.upper() if i.islower() else i.lower()])\n return S\n\nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)\n\n\"\"\"\nEditorial by DOSHI\nUse the method string.swapcase(s) to swap lower case letters to upper case letters and vice versa.\n\n\nTo learn more, visit:\nhttps://docs.python.org/2/library/string.html#string.swapcase\n\nSet by DOSHI\nProblem Setter's code:\n\nimport string\nprint string.swapcase(raw_input())\n\"\"\"\n","repo_name":"laziestcoder/Python_HR_Codes","sub_path":"3 Strings/sWAP cASE.py","file_name":"sWAP cASE.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"74549547367","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time,sys\n\ndef genCategory(cat, catArrays):\n\tif cat==None:\n\t\treturn None\n\telse:\n\t\tfor c in catArrays.keys():\n\t\t\tif cat in catArrays[c]:\n\t\t\t\treturn c\n\ndef parseVenue(full_venue, catArrays, tileID, query):\n\t\n\ttry:\n\t\tcreatedAt = time.gmtime(full_venue[\"createdAt\"])\n\texcept:\n\t\tcreatedAt = 0\n\n\ttry:\n\t\tc= full_venue['categories'][0]['name']\n\texcept:\n\t\tc = 'None'\n\n\tif 'price' in full_venue.keys():\n\t\tprice = str(full_venue['price']['tier'])\n\telse:\n\t\tprice = '-1'\n\n\tif 'rating' in full_venue.keys():\n\t\trating = str(full_venue['rating'])\n\telse:\n\t\trating = '-1'\n\n\tif \"tags\" in full_venue.keys():\n\t\ttags = ';'.join(full_venue['tags'])\n\telse:\n\t\ttags = ''\n\n\tif 'description' in full_venue.keys():\n\t\tdescription = full_venue['description']\n\telse:\n\t\tdescription = ''\n\n\n\n\n\tvenueDict = { 'genCategory': genCategory(c,catArrays),\n\t\t'category': c.encode('utf-8'),\n\t\t'name': full_venue['name'].encode('utf-8'),\n\t \t'lon': full_venue['location']['lng'],\n\t \t'lat': full_venue['location']['lat'],\n\t \t'checkIns': full_venue['stats']['checkinsCount'],\n\t \t'tips': full_venue['stats']['tipCount'],\n\t \t'users': full_venue['stats']['usersCount'],\n\t \t'createdAt': time.strftime(\"%Y.%m.%d \", createdAt),\n\t \t'tileID': tileID,\n\t \t'ID':full_venue['id'].encode('utf-8'),\n\t \t'query':query.encode('utf-8'),\n\t \t'time':time.strftime(\"%Y.%m.%d %H:%M:%S \"),\n\t \t'verified': str(full_venue['verified']),\n\t \t'price': price,\n\t \t'rating': rating,\n\t \t'tags': tags.encode('utf-8'),\n\t \t'photoCount': str(full_venue['photos']['count']),\n\t \t'description': description.encode('utf-8')\n\t \t}\n\n\treturn venueDict\n\n","repo_name":"Casyfill/Square_cities","sub_path":"scraper/misc/parseVenue.py","file_name":"parseVenue.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"39062381708","text":"import aiosqlite\n\nasync def db_create():\n async with aiosqlite.connect(\"main.db\") as db:\n async with db.cursor() as cursor:\n await cursor.execute('CREATE TABLE IF NOT EXISTS main (channel_id INTEGER , guild_id INTEGER)')\n await db.commit()\n\nasync def get_guild_ids():\n guild_ids = []\n async with aiosqlite.connect('main.db') as db:\n async with db.execute('SELECT guild_id, channel_id FROM main WHERE channel_id IS NOT NULL') as cursor:\n async for row in cursor:\n guild_ids.append((row[0], row[1]))\n return guild_ids\n\nasync def add_guild(guild_id: int): \n async with aiosqlite.connect('main.db') as db:\n async with db.execute('SELECT guild_id FROM main WHERE guild_id = ?', (guild_id.id,)) as cursor:\n result = await cursor.fetchone()\n if result is None:\n await db.execute('INSERT INTO main(guild_id, channel_id) VALUES(?, ?)', (guild_id.id, None))\n await db.commit()\n\nasync def get_channel_id(guild_id):\n if isinstance(guild_id, tuple):\n guild_id = str(guild_id[0])\n else:\n guild_id = str(guild_id)\n async with aiosqlite.connect('main.db') as db:\n async with db.execute('SELECT channel_id FROM main WHERE guild_id = ?', (guild_id,)) as cursor:\n result = await cursor.fetchone()\n if result is None:\n return None\n else:\n return result[0]\n","repo_name":"hope61/Freebie-epicgames-checker","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20760514853","text":"\ndef testIterations():\n words = ['cat', 'window', 'door']\n #Take a copy of the words before\n for word in words[: ]:\n if word == 'window' :\n print(\"Equal\")\n words.append(\"hese\")\n print(word);\n\n for i in range(len(words)) :\n print(\"i: \", i , \"data: \", words[i])","repo_name":"mitso23/training","sub_path":"python/basictraining/controlFlow.py","file_name":"controlFlow.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37425616307","text":"import time\r\nimport cv2\r\n\r\nDEFAULT_BASE_DIR: str = 'resources'\r\n\r\n\r\nclass ContinuousPhotoCapture:\r\n def __init__(self):\r\n # Initialize the OpenCV video capture object\r\n self.camera = cv2.VideoCapture(0)\r\n # Set the resolution\r\n self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\r\n self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\r\n self.running = False\r\n\r\n def capture_photos(self, photos_to_capture):\r\n captured_count = 0 # Counter for captured photos\r\n while captured_count < photos_to_capture:\r\n ret, frame = self.camera.read()\r\n if not ret:\r\n continue\r\n filename = time.strftime(DEFAULT_BASE_DIR + f\"/photo_{captured_count}_%Y%m%d%H%M%S.jpg\")\r\n cv2.imwrite(filename, frame)\r\n captured_count += 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n capture_instance = ContinuousPhotoCapture()\r\n capture_instance.capture_photos(photos_to_capture=3)\r\n","repo_name":"MosheNamdar/alert_aware","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15319406124","text":"import cv2\nimport matplotlib.pyplot as plt\n\n\nimg_path = '/Users/kimwanki/developer/ML/imgFile/9.jpeg'\nxml_path = '/Users/kimwanki/developer/ML/imgFile/9.xml'\n\na = cv2.imread(img_path)\na = cv2.rotate(a , cv2.ROTATE_90_COUNTERCLOCKWISE)\nprint(a.shape)\n# plt.imshow(a)\n# plt.show()\n\nlocation_list = []\n\nwith open(xml_path,'r') as xml_read:\n # print(xml_read.readlines())\n for i in xml_read.readlines():\n if 'ymin' in i:\n x = i.replace('\\t','').replace('\\n','').replace('','').replace('','')\n location_list.append(int(x))\n elif 'xmin' in i:\n x = i.replace('\\t','').replace('\\n','').replace('','').replace('','')\n location_list.append(int(x))\n elif 'ymax' in i:\n x = i.replace('\\t', '').replace('\\n', '').replace('', '').replace('', '')\n location_list.append(int(x))\n elif 'xmax' in i:\n x = i.replace('\\t', '').replace('\\n', '').replace('', '').replace('', '')\n location_list.append(int(x))\n\na = a[location_list[1]:location_list[3],location_list[0]:location_list[2],:]\nplt.imshow(a)\nplt.show()\n\n# lotation =\n\nprint(location_list)\n\n\n\n\n\n\n","repo_name":"KimWanki/wkkim_ML","sub_path":"detection/loadImg.py","file_name":"loadImg.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"34988562325","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 23 23:05:44 2021\r\n\r\n@author: Caroline\r\n\"\"\"\r\n#FOR\r\n#W= escribir o crear si no existe\r\n\r\nimport csv\r\n\r\ndoc=open(\"archivo.csv\", \"w\")\r\n\r\n#convención\r\ndoc_csv_w= csv.writer(doc)\r\n\r\nlista= [[\"Coffee\", 1528], [\"Nana\", 2583], [\"louis\", 5312], [\"Jaime\", 1024]]\r\n\r\nfor x in lista:\r\n doc_csv_w.writerow(x)\r\n\r\ndoc.close()","repo_name":"carolineprada/Python-Avanced","sub_path":"create_for.py","file_name":"create_for.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"21136232475","text":"\"\"\"Module to perform predictions over the test set.\"\"\"\n\nfrom typing import Dict, List, Tuple\nimport logging\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\nfrom .utils.dataset_conversion import create_test_datasets\nfrom .utils.train_tools import extract_number_features_model\n\nlogger = logging.getLogger(__name__)\n\n\ndef smape(predictions: List, truth: List) -> float:\n\n accuracy = (\n 1\n - abs(\n np.nanmean(abs(np.squeeze(predictions) - truth))\n / np.mean(np.squeeze(predictions) + truth)\n )\n ) * 100\n return accuracy\n\n\ndef predict_test(\n model: tf.keras.models.Model, data: Dict[str, pd.DataFrame], params: Dict\n) -> Tuple[Dict[str, pd.DataFrame], Dict[str, pd.DataFrame]]:\n \"\"\"Function to perform model predictions over data.\n\n Parameters\n ----------\n model : tf.keras.models.Model\n Model to be used for predictions.\n data : Dict[str, pd.DataFrame]\n It is a dictionary of ids: pd.DataFrame containing\n test time series.\n params : Dict\n dictionary containing the relevant hyperparameters and configurations.\n\n Returns\n -------\n keys : List[str]\n The list of all ids present in the data\n acc_ad_spend_d7 : List[float]\n The list of all ad_spend accuracies d7 per id\n acc_revenue_d7 : List[float]\n The list of all revenue accuracies d7 per id\n acc_ad_spend_d3 : List[float]\n The list of all ad_spend accuracies d3 per id\n acc_revenue_d3 : List[float]\n The list of all revenue accuracies d3 per id\n \"\"\"\n # Get the number of features\n n_total_features, n_deterministic_features = extract_number_features_model(model)\n n_aleatoric_features = len(params[\"aleatoric_features\"])\n\n logger.debug(\n f\"\"\"- n total features: {n_total_features}\n - n deterministic features: {n_deterministic_features}\n - n_aleatoric features: {n_aleatoric_features}\n\n - aleatoric_features: {params[\"aleatoric_features\"]}\"\"\"\n )\n\n # Create the test datasets\n test_datasets = create_test_datasets(\n data,\n n_deterministic_features=n_deterministic_features,\n window_size=params[\"window_len\"],\n forecast_size=params[\"forecast_len\"],\n batch_size=params[\"batch_size\"],\n )\n\n # This is a loop over one element only, but for the structure of tf datasets it is necessary.\n # we calculate the accuracy of revenues and adspend per id\n keys = list(data.keys())\n predictions, true_values = {}, {}\n acc_ad_spend_d7, acc_revenue_d7, acc_ad_spend_d3, acc_revenue_d3 = [], [], [], []\n\n for i, elem in enumerate(test_datasets):\n for (past, future), (truth_adspend, truth_revenues) in elem:\n predictions[keys[i]] = model.predict((past, future))\n true_values[keys[i]] = (truth_adspend, truth_revenues)\n acc_ad_spend_d7.append(smape(predictions[keys[i]][0], truth_adspend))\n acc_revenue_d7.append(smape(predictions[keys[i]][1], truth_revenues))\n acc_ad_spend_d3.append(\n smape(predictions[keys[i]][0][:, :3, :], truth_adspend[:, :3])\n )\n acc_revenue_d3.append(\n smape(predictions[keys[i]][1][:, :3, :], truth_revenues[:, :3])\n )\n\n return keys, acc_ad_spend_d7, acc_revenue_d7, acc_ad_spend_d3, acc_revenue_d3\n\n\ndef accuracy_calculation(\n model: tf.keras.models.Model, data: Dict[str, pd.DataFrame], params: Dict\n) -> pd.DataFrame:\n \"\"\"Function to perform model predictions over data.\n\n Parameters\n ----------\n model : tf.keras.models.Model\n Model to be used for predictions.\n data : Dict[str, pd.DataFrame]\n It is a dictionary of ids: pd.DataFrame containing\n test time series.\n params : Dict\n dictionary containing the relevant hyperparameters and configurations.\n\n Returns\n -------\n acc_df: a DataFrame containing all the accuracies per id\n \"\"\"\n logger.debug(\"Calculating the accuracy\")\n (\n keys,\n acc_ad_spend_d7,\n acc_revenue_d7,\n acc_ad_spend_d3,\n acc_revenue_d3,\n ) = predict_test(model=model, data=data, params=params)\n\n logger.debug(\"Creation of the dataframe\")\n\n acc_df = pd.DataFrame()\n acc_df[\"id\"] = keys\n acc_df[\"acc_ad_spend\"] = acc_ad_spend_d7\n acc_df[\"acc_revenues\"] = acc_revenue_d7\n acc_df[\"acc_roas\"] = acc_df[[\"acc_ad_spend\", \"acc_revenues\"]].mean(axis=1)\n acc_df[\"acc_ad_spend_suggested\"] = acc_ad_spend_d7\n acc_df[\"acc_revenues_suggested\"] = acc_revenue_d7\n acc_df[\"acc_roas_suggested\"] = acc_df[\n [\"acc_ad_spend_suggested\", \"acc_revenues_suggested\"]\n ].mean(axis=1)\n acc_df[\"acc_ad_spend_d3\"] = acc_ad_spend_d3\n acc_df[\"acc_revenues_d3\"] = acc_revenue_d3\n acc_df[\"acc_roas_d3\"] = acc_df[[\"acc_ad_spend_d3\", \"acc_revenues_d3\"]].mean(axis=1)\n # acc_df.to_csv(\"data/07_model_output/neural_network/predictions_accuracy.csv\", index=False)\n return acc_df\n","repo_name":"odartsi/Neural_Network_Timeseries","sub_path":"accuracy_calculation.py","file_name":"accuracy_calculation.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25350921842","text":"from keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\ndef keras_tokenizer(texts, num_words=None, mode=None, maxlen=None, sequences=False):\n \"\"\"Preprocess text with Keras Tokenizer.\n\n Args:\n texts (list, str): Collections of documents to preprocess.\n num_words (int): Length of the vocabulary.\n mode (str): Can be \"count\" or \"tfidf\".\n\n Returns:\n encoded_docs (array, int | float): Can be Bag-of-Words or TF-IDF weight matrix, depending\n on \"mode\".\n\n \"\"\"\n t = Tokenizer(filters='!\"#$%&()*+,-./:;<=>?@[\\]^`{|}~ ', num_words=num_words)\n t.fit_on_texts(texts)\n\n if sequences:\n seq = t.texts_to_sequences(texts)\n encoded_docs = pad_sequences(seq, maxlen=maxlen)\n else:\n encoded_docs = t.texts_to_matrix(texts, mode=mode)\n\n print('Documents count: {}'.format(t.document_count))\n print('Found %s unique tokens.' % len(t.word_index))\n print('Shape of encoded docs: {}'.format(encoded_docs.shape))\n\n return encoded_docs\n","repo_name":"nestauk/clio-train","sub_path":"src/clio/text_preprocessing.py","file_name":"text_preprocessing.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1057531781","text":"import sys\n\nwhile True:\n array = list(map(int, sys.stdin.readline().split()))\n\n array.sort()\n\n if array == [0, 0, 0]:\n break\n maximum = array.pop()\n\n sum = array[0] ** 2 + array[1] ** 2\n\n if maximum ** 2 == sum:\n print(\"right\")\n continue\n print(\"wrong\")","repo_name":"hydenny/coding-test-practice","sub_path":"백준 코딩테스트 문제풀이/기본 수학2/직각삼각형.py","file_name":"직각삼각형.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15805472924","text":"import os\nimport pandas as pd\nimport gc\nimport sys\nfrom datetime import datetime, timedelta, date\nfrom babel.dates import format_date\npd.options.mode.chained_assignment = None\n\nif __name__ == '__main__':\n try:\n # add parent directory to path so helpers file can be referenced\n sys.path.append(os.path.dirname((os.path.dirname(__file__))))\n from helpers import *\n\n # set working directory, change if necessary\n os.chdir(os.path.dirname(__file__))\n\n # set source data\n url = 'https://raw.githubusercontent.com/jgehrcke/covid-19-germany-gae/master/more-data/7di-rki-by-ags.csv'\n if not os.path.exists('data'):\n os.makedirs('data')\n with open(os.path.join('data', '7di-rki-by-ags.csv'), 'wb') as f:\n f.write(download_data(url).read())\n\n # read data and transpose\n df = pd.read_csv(\n './data/7di-rki-by-ags.csv', encoding='utf-8', index_col=0).transpose()\n dftable = pd.read_csv(\n './pop_ags.csv', encoding='utf-8', index_col=0)\n\n # get current date for chart notes\n timestamp_str = df.columns[-1]\n timestamp_dt = datetime.strptime(\n timestamp_str, '%Y-%m-%dT%H:%M:%S+%f') + timedelta(days=1)\n timestamp_str = timestamp_dt.strftime('%-d. %-m. %Y')\n timestamp_str2 = format_date(timestamp_dt, 'd. MMMM', locale='de_DE')\n\n # clean AGS and add leading zero to numerical AGS values with <5 characters\n df.index = df.index.str.replace('_7di', '')\n df.index = df.index.str.rjust(5, '0')\n df.index = df.index.rename('ID')\n dftable.index = dftable.index.astype(str)\n dftable.index = dftable.index.str.rjust(5, '0')\n\n # sort by AGS\n df.sort_values(by=['ID'], inplace=True)\n\n # remove last row with German incidence and Berlin districts\n df = df[:-1]\n df = df.drop(df.index[325:337])\n\n # create new df for choropleth map\n dfmap = df[[]].copy()\n dfedumap = df[[]].copy()\n\n # Notbremse on the 22th of April in dfmap?\n # starting from the the 20th of April, comes into effect two days later\n dfmap['Wert'] = 'keine Notbremse'\n dfmap['Wert'][(df.iloc[:, 414] > 100) & (\n df.iloc[:, 413] > 100) & (df.iloc[:, 412] > 100)] = 'Notbremse'\n\n # Notbremse for schools on the 22th of April in dfedumap?\n # starting from the the 20th of April, comes into effect two days later\n dfedumap['Wert'] = 'Schulen offen'\n dfedumap['Wert'][(df.iloc[:, 414] > 165) & (\n df.iloc[:, 413] > 165) & (df.iloc[:, 412] > 165)] = 'Schulen geschlossen'\n\n # Notbremse on the 22th of April in dftable?\n # starting from the the 20th of April, comes into effect two days later\n dftable['Notbremse'] = '\t\t\t\t✖\t\t\t\t' # with \t\t\t\tblank\n dftable['Notbremse'][(df.iloc[:, 414] > 100) & (\n df.iloc[:, 413] > 100) & (df.iloc[:, 412] > 100)] = '\t\t\t\t✔\t\t\t\t' # with \t\t\t\tblank\n\n # When does the Notbremse come into effect? (placeholder)\n # dfmap['Gilt ab'] = 'current day + 2 days'\n\n # calculate current status of Notbremse since 23th of April\n # update every day (i)\n for i in range(415, df.shape[1]):\n # update every AGS (j)\n for j in range(dfmap.shape[0]):\n if dfmap['Wert'][j] == 'Notbremse':\n if (df.iloc[j, i] < 100) & (df.iloc[j, i-1] < 100) & (df.iloc[j, i-2] < 100) & (df.iloc[j, i-3] < 100) & (df.iloc[j, i-4] < 100):\n dfmap['Wert'][j] = 'keine Notbremse'\n dftable['Notbremse'][j] = '\t\t\t\t✖\t\t\t\t'\n # dfmap['Gilt ab'][j] = df.columns[i]\n if dfedumap['Wert'][j] == 'Schulen geschlossen':\n if (df.iloc[j, i] < 165) & (df.iloc[j, i-1] < 165) & (df.iloc[j, i-2] < 165) & (df.iloc[j, i-3] < 165) & (df.iloc[j, i-4] < 165):\n dfedumap['Wert'][j] = 'Schulen offen'\n # dfmap['Gilt ab'][j] = df.columns[i]\n else:\n if (df.iloc[j, i] > 100) & (df.iloc[j, i-1] > 100) & (df.iloc[j, i-2] > 100):\n dfmap['Wert'][j] = 'Notbremse'\n dftable['Notbremse'][j] = '\t\t\t\t✔\t\t\t\t'\n # dfmap['Gilt ab'][j] = df.columns[i]\n if (df.iloc[j, i] > 165) & (df.iloc[j, i-1] > 165) & (df.iloc[j, i-2] > 165):\n dfedumap['Wert'][j] = 'Schulen geschlossen'\n # dfmap['Gilt ab'][j] = df.columns[i]\n\n # add current incidence in df of table chart\n dftable['Inzidenz_tmp'] = df.iloc[:, -1]\n\n # calculate trend and add to new Inzidenz column\n dftable['Trend'] = (\n ((df.iloc[:, -1] - df.iloc[:, -3]) / df.iloc[:, -3]) * 100)\n dftable['Trend_arrow'] = ''\n dftable['Trend_arrow'][(dftable['Trend'] < -5)] = ' ↓'\n dftable['Trend_arrow'][(dftable['Trend'] > 5)] = ' ↑'\n dftable['Inzidenz_tmp'] = dftable['Inzidenz_tmp'].round(\n 0).astype(int).clip(lower=0) # trim negative numbers\n\n dftable['Inzidenz'] = dftable['Inzidenz_tmp'].astype(\n str) + dftable['Trend_arrow']\n\n # sort dftable by pop\n dftable.sort_values(\n dftable.columns[1], inplace=True, ascending=False)\n\n # drop some columns and delete old df from memory\n del [[df]]\n del dftable['Trend']\n del dftable['Trend_arrow']\n del dftable['Inzidenz_tmp']\n del dftable['Bewohner']\n gc.collect()\n\n # drop AGS and make region name the row index\n dftable.set_index('Region', inplace=True)\n\n # change order of columns\n dftable = dftable[['Inzidenz', 'Notbremse']]\n\n # number of regions with Notbremse on\n notbremse = (dfmap['Wert'] == 'Notbremse').sum()\n # calculate difference between 24th of April and now\n notbremse_diff = 367 - notbremse\n notbremse_diff = notbremse_diff.astype(str)\n notbremse = (dfmap['Wert'] == 'Notbremse').sum().astype(str)\n\n # number of regions with schools closed\n schools = (dfedumap['Wert'] == 'Schulen geschlossen').sum().astype(str)\n\n # set chart titles and notes\n # title_map = notbremse + ' Regionen sind derzeit von der Notbremse betroffen'\n subtitle_chart = 'Am ' + timestamp_str2 + ' lagen ' + \\\n notbremse + ' Kreise und Städte 3 Tage in Folge über dem Inzidenzwert von 100 und noch keine 5 Tage in Folge darunter - das sind ' + \\\n notbremse_diff + ' weniger als noch Ende April'\n subtitle_chart2 = 'Am ' + timestamp_str2 + ' lagen ' + \\\n schools + ' Kreise und Städte 3 Tage in Folge über dem Inzidenzwert von 165 und noch keine 5 Tage in Folge darunter'\n notes_chart = 'Die Grafik zeigt, ob die Notbremse gemäss RKI-Inzidenz (demnächst) greift, nicht ob sie vor Ort bereits in Kraft ist. Der Berechnung liegen korrigierte Werte zugrunde inklusive Nachmeldungen. Stand: ' + \\\n timestamp_str\n notes_chart2 = 'Der Berechnung liegen korrigierte Inzidenzwerte zugrunde inklusive Nachmeldungen. Stand: ' + \\\n timestamp_str\n\n # insert id manually and run function\n update_chart(id='530c9a2b291a3ac848e9dc471a762204',\n data=dfmap, notes=notes_chart, subtitle=subtitle_chart)\n update_chart(id='4c6603c3b465e2d11eb5b22d736dadcc',\n data=dfedumap, notes=notes_chart2, subtitle=subtitle_chart2)\n update_chart(id='050befd50ccb2f5f9080d4bba4df423d',\n data=dftable, notes=notes_chart, subtitle=subtitle_chart)\n\n except:\n raise\n finally:\n f.close()\n","repo_name":"nzzdev/st-methods","sub_path":"bots/corona-charts-de/rki_notbremse/rki_notbremse.py","file_name":"rki_notbremse.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"53"}
+{"seq_id":"42730675592","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 2 16:06:03 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport cv2\r\nfrom scipy.ndimage.interpolation import map_coordinates\r\nfrom scipy.ndimage.filters import gaussian_filter\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#%%\r\ndef elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):\r\n \"\"\"Elastic deformation of images as described in [Simard2003]_ (with modifications).\r\n .. [Simard2003] Simard, Steinkraus and Platt, \"Best Practices for\r\n Convolutional Neural Networks applied to Visual Document Analysis\", in\r\n Proc. of the International Conference on Document Analysis and\r\n Recognition, 2003.\r\n\r\n Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5\r\n \"\"\"\r\n if random_state is None:\r\n random_state = np.random.RandomState(None)\r\n\r\n shape = image.shape\r\n shape_size = shape[:2]\r\n \r\n # Random affine\r\n center_square = np.float32(shape_size) // 2\r\n square_size = min(shape_size) // 3\r\n pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])\r\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\r\n M = cv2.getAffineTransform(pts1, pts2)\r\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\r\n\r\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\r\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\r\n dz = np.zeros_like(dx)\r\n\r\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\r\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))\r\n\r\n return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)\r\n\r\n#%%\r\n\r\n# Define function to draw a grid\r\n# def draw_grid(im, grid_size):\r\n# # Draw grid lines\r\n# for i in range(0, im.shape[1], grid_size):\r\n# cv2.line(im, (i, 0), (i, im.shape[0]), color=(255,))\r\n# for j in range(0, im.shape[0], grid_size):\r\n# cv2.line(im, (0, j), (im.shape[1], j), color=(255,))\r\n\r\n# # Load images\r\n# im = cv2.imread(\"../input/train/10_1.tif\", -1)\r\n# im_mask = cv2.imread(\"../input/train/10_1_mask.tif\", -1)\r\n\r\n# # Draw grid lines\r\n# draw_grid(im, 50)\r\n# draw_grid(im_mask, 50)\r\n\r\n# # Merge images into separete channels (shape will be (cols, rols, 2))\r\n# im_merge = np.concatenate((im[...,None], im_mask[...,None]), axis=2)\r\n\r\n#%%\r\nim_merge = cv2.imread(\"./000465_image.png\")\r\nim1 = cv2.imread(\"./test.png\")\r\na = np.array(im_merge)\r\nb = np.array(im1)\r\nprint(a.shape,b.shape)\r\n# a = a[::,:240:,::]\r\nprint(a.shape,b.shape)\r\nim_merge_t = elastic_transform(a, im_merge.shape[1] * 2.8, im_merge.shape[1] * 0.07, im_merge.shape[1] * 0.09)\r\n\r\ncv2.imshow(\"123\",im_merge_t)\r\ncv2.imshow(\"123ori\",a)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n\r\n","repo_name":"ShinewineW/LearningSmth","sub_path":"Cv2Learning/elastic_transform/elastic_transform.py","file_name":"elastic_transform.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"12596162998","text":"import sys\n\nfrom parser import Parser\nfrom filler import Polygon, Border\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n print('provide .igs file!')\n sys.exit(0)\n\n parser = Parser(sys.argv[1])\n lines = parser.lines\n\n if not lines:\n print('no lines found! check file')\n sys.exit(0)\n\n border = Border(lines)\n polygon = Polygon(lines)\n\n polygon.fill_ploygon(border.boundary)\n\n assert polygon.rectangles\n\n polygon.print_rectangles('out.txt')","repo_name":"ghost211221/polygon_filler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26474206667","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('rating', views.rating, name='rating'),\n path('calc', views.calc, name='calc'),\n path('test', views.test, name='test'),\n path('result', views.result, name='result'),\n]","repo_name":"anandmate/Dataware","sub_path":"quiz/application/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4380819654","text":"import os\nimport win32com.client\nimport openpyxl\nimport random\nfrom openpyxl import *\nimport re\nimport time\nimport ReadExcel\nimport scipy\nimport numpy\nfrom scipy.stats import norm\nimport datetime\nimport matplotlib.pyplot as plt\n\n\ndef build_floor_plan_and_bracing(SapModel, tower, all_floor_plans, all_floor_bracing, floor_num, floor_elev):\n print('Building floor plan...')\n floor_plan_num = tower.floor_plans[floor_num-1]\n floor_plan = all_floor_plans[floor_plan_num-1]\n #Create members for floor plan\n for member in floor_plan.members:\n kip_in_F = 3\n SapModel.SetPresentUnits(kip_in_F)\n start_node = member.start_node\n end_node = member.end_node\n start_x = start_node[0]\n start_y = start_node[1]\n start_z = floor_elev\n end_x = end_node[0]\n end_y = end_node[1]\n end_z = floor_elev\n section_name = member.sec_prop\n [ret, name] = SapModel.FrameObj.AddByCoord(start_x, start_y, start_z, end_x, end_y, end_z, PropName=section_name)\n if ret != 0:\n print('ERROR creating floor plan member on floor ' + str(floor_num))\n #assign masses to mass nodes and create steel rod\n mass_node_1 = floor_plan.mass_nodes[0]\n mass_node_2 = floor_plan.mass_nodes[1]\n floor_mass = tower.floor_masses[floor_num-1]\n mass_per_node = floor_mass/2\n #Create the mass node point\n [ret, mass_name_1] = SapModel.PointObj.AddCartesian(mass_node_1[0],mass_node_1[1],floor_elev,MergeOff=False)\n if ret != 0:\n print('ERROR setting mass nodes on floor ' + str(floor_num))\n [ret, mass_name_2] = SapModel.PointObj.AddCartesian(mass_node_2[0],mass_node_2[1],floor_elev,MergeOff=False)\n if ret != 0:\n print('ERROR setting mass nodes on floor ' + str(floor_num))\n #Assign masses to the mass nodes\n #Shaking in the x direcion!\n N_m_C = 10\n SapModel.SetPresentUnits(N_m_C)\n ret = SapModel.PointObj.SetMass(mass_name_1, [mass_per_node,0,0,0,0,0],0,True,False)\n if ret[0] != 0:\n print('ERROR setting mass on floor ' + str(floor_num))\n ret = SapModel.PointObj.SetMass(mass_name_2, [mass_per_node,0,0,0,0,0])\n if ret[0] != 0:\n print('ERROR setting mass on floor ' + str(floor_num))\n #Create steel rod\n kip_in_F = 3\n SapModel.SetPresentUnits(kip_in_F)\n [ret, name1] = SapModel.FrameObj.AddByCoord(mass_node_1[0], mass_node_1[1], floor_elev, mass_node_2[0], mass_node_2[1], floor_elev, PropName='Steel rod')\n if ret !=0:\n print('ERROR creating steel rod on floor ' + str(floor_num))\n #Create floor load forces\n N_m_C = 10\n SapModel.SetPresentUnits(N_m_C)\n ret = SapModel.PointObj.SetLoadForce(mass_name_1, 'DEAD', [0, 0, mass_per_node*9.81, 0, 0, 0])\n ret = SapModel.PointObj.SetLoadForce(mass_name_2, 'DEAD', [0, 0, mass_per_node*9.81, 0, 0, 0])\n #create floor bracing\n floor_bracing_num = tower.floor_bracing_types[floor_num-1]\n floor_bracing = all_floor_bracing[floor_bracing_num-1]\n #Find scaling factors\n scaling_x = floor_plan.scaling_x\n scaling_y = floor_plan.scaling_y\n #Create floor bracing\n print('Building floor bracing...')\n for member in floor_bracing.members:\n kip_in_F = 3\n SapModel.SetPresentUnits(kip_in_F)\n start_node = member.start_node\n end_node = member.end_node\n start_x = start_node[0] * scaling_x\n start_y = start_node[1] * scaling_y\n start_z = floor_elev\n end_x = end_node[0] * scaling_x\n end_y = end_node[1] * scaling_y\n end_z = floor_elev\n section_name = member.sec_prop\n [ret, name] = SapModel.FrameObj.AddByCoord(start_x, start_y, start_z, end_x, end_y, end_z, PropName=section_name)\n if ret != 0:\n print('ERROR creating floor bracing member on floor ' + str(floor_num))\n return SapModel\n\ndef build_face_bracing(SapModel, tower, all_floor_plans, all_face_bracing, floor_num, floor_elev):\n print('Building face bracing...')\n i = 1\n while i <= len(Tower.side):\n face_bracing_num = Tower.bracing_types[floor_num][i-1]\n face_bracing = all_face_bracing[face_bracing_num-1]\n\n #Find scaling factors\n floor_plan_num = tower.floor_plans[floor_num-1]\n floor_plan = all_floor_plans[floor_plan_num-1]\n \n scaling_x = floor_plan.scaling_x\n scaling_y = floor_plan.scaling_y\n scaling_z = tower.floor_heights[floor_num-1]\n \n for member in face_bracing.members:\n kip_in_F = 3\n SapModel.SetPresentUnits(kip_in_F)\n start_node = member.start_node\n end_node = member.end_node\n \n #Create face bracing for long side\n if i == 1 or i == 3:\n scaling_x_or_y = scaling_x\n #Create face bracing for short side\n elif i == 2 or i == 4:\n scaling_x_or_y = scaling_y\n\n start_x = start_node[0] * scaling_x_or_y\n start_y = 0\n start_z = start_node[1] * scaling_z + floor_elev\n end_x = end_node[0] * scaling_x_or_y\n end_y = 0\n end_z = end_node[1] * scaling_z + floor_elev\n section_name = member.sec_prop \n #rotate coordinate system through side 1 - 4\n if i == 1:\n ret = SapModel.CoordSys.SetCoordSys('CSys1', 0, 0, 0, 0, 0, 0)\n elif i == 2:\n ret = SapModel.CoordSys.SetCoordSys('CSys1', scaling_x, 0, 0, 90, 0, 0)\n elif i == 3:\n ret = SapModel.CoordSys.SetCoordSys('CSys1', 0, scaling_y, 0, 0, 0, 0)\n elif i == 4:\n ret = SapModel.CoordSys.SetCoordSys('CSys1', 0, 0, 0, 90, 0, 0)\n\n [ret, name] = SapModel.FrameObj.AddByCoord(start_x, start_y, start_z, end_x, end_y, end_z, ' ', section_name, ' ', 'CSys1')\n if ret != 0:\n print('ERROR creating floor bracing member on floor ' + str(floor_num))\n i += 1\n return SapModel\n\ndef set_base_restraints(SapModel):\n # Set fixed ends on all ground level nodes\n node_num = 1\n [ret, number_nodes, all_node_names] = SapModel.PointObj.GetNameList()\n for node_name in all_node_names:\n [ret, x, y, z] = SapModel.PointObj.GetCoordCartesian(node_name, 0, 0, 0)\n if z == 0:\n [ret_set_restraint, ret] = SapModel.PointObj.SetRestraint(node_name, [True, True, True, True, True, True])\n return SapModel\n\ndef define_loading(SapModel, time_history_loc, save_loc):\n print('Defining loading...')\n # Define time history function\n N_m_C = 10\n SapModel.SetPresentUnits(N_m_C)\n SapModel.Func.FuncTH.SetFromFile('GM', time_history_loc, 1, 0, 1, 2, True)\n # Set the time history load case\n N_m_C = 10\n SapModel.SetPresentUnits(N_m_C)\n SapModel.LoadCases.ModHistLinear.SetCase('GM')\n SapModel.LoadCases.ModHistLinear.SetMotionType('GM', 1)\n SapModel.LoadCases.ModHistLinear.SetLoads('GM', 1, ['Accel'], ['U1'], ['GM'], [1], [1], [0], ['Global'], [0])\n SapModel.LoadCases.ModHistLinear.SetTimeStep('GM', 250, 0.1)\n # Create load combination\n SapModel.RespCombo.Add('DEAD + GM', 0)\n SapModel.RespCombo.SetCaseList('DEAD + GM', 0, 'DEAD', 1)\n SapModel.RespCombo.SetCaseList('DEAD + GM', 0, 'GM', 1)\n # Save the model\n ret = SapModel.File.Save(save_loc)\n if ret != 0:\n print('ERROR saving SAP2000 file')\n return SapModel\n\n#returns the max acceleration in g, max drift (displacement) in mm, and weight in pounds\ndef run_analysis(SapModel):\n #Run Analysis\n print('Computing...')\n SapModel.Analyze.RunAnalysis()\n print('Finished computing.')\n #Get RELATIVE acceleration from node\n SapModel.Results.Setup.DeselectAllCasesAndCombosForOutput()\n SapModel.Results.Setup.SetComboSelectedForOutput('DEAD + GM', True)\n #set type to envelope\n SapModel.Results.Setup.SetOptionModalHist(1)\n #Get joint acceleration\n #Find a node that is on the top floor\n [ret, number_nodes, all_node_names] = SapModel.PointObj.GetNameList()\n z_max = 0\n z = 0\n for node_name in all_node_names:\n [ret, x, y, z] = SapModel.PointObj.GetCoordCartesian(node_name, 0, 0, 0)\n if z > z_max:\n roof_node_name = node_name\n z_max = z\n #Retrieve max accelerations\n #Set units to metres\n N_m_C = 10\n SapModel.SetPresentUnits(N_m_C)\n g = 9.81\n ret = SapModel.Results.JointAccAbs(roof_node_name, 0)\n max_and_min_acc = ret[7]\n max_pos_acc = max_and_min_acc[0]\n min_neg_acc = max_and_min_acc[1]\n if abs(max_pos_acc) >= abs(min_neg_acc):\n max_acc = abs(max_pos_acc)/g\n elif abs(min_neg_acc) >= abs(max_pos_acc):\n max_acc = abs(min_neg_acc)/g\n else:\n print('Could not find max acceleration')\n #Get joint displacement\n #Set units to millimetres\n N_mm_C = 9\n SapModel.SetPresentUnits(N_mm_C)\n ret = SapModel.Results.JointDispl(roof_node_name, 0)\n max_and_min_disp = ret[7]\n max_pos_disp = max_and_min_disp[0]\n min_neg_disp = max_and_min_disp[1]\n if abs(max_pos_disp) >= abs(min_neg_disp):\n max_drift = abs(max_pos_acc)\n elif abs(min_neg_disp) >= abs(max_pos_disp):\n max_drift = abs(min_neg_disp)\n else:\n print('Could not find max drift')\n #Get weight\n #Get base reactions\n SapModel.Results.Setup.DeselectAllCasesAndCombosForOutput()\n SapModel.Results.Setup.SetCaseSelectedForOutput('DEAD')\n #SapModel.Results.BaseReact(NumberResults, LoadCase, StepType, StepNum, Fx, Fy, Fz, Mx, My, Mz, gx, gy, gz)\n ret = SapModel.Results.BaseReact()\n if ret[0] != 0:\n print('ERROR getting base reaction forces')\n base_react = ret[7][0]\n total_weight = base_react / 9.81\n #convert to lb\n total_weight = total_weight / 0.45359237\n return max_acc, max_drift, total_weight\n\n\ndef get_FABI(max_acc, max_disp, footprint, weight):\n design_life = 100 #years\n construction_cost = 2500000*(weight**2)+6*(10**6)\n land_cost = 35000 * footprint\n annual_building_cost = (land_cost + construction_cost) / design_life\n floor_num = len(Tower.floor_heights)\n if floor_num <= 2:\n annual_revenue = 250 * floor_num\n elif floor_num <= 9:\n annual_revenue = 250 * 2 + 175 * (floor_num - 2)\n elif floor_num <= 15:\n annual_revenue = 250 * 2 + 175 * 7 + 225 * (floor_num - 9)\n else:\n annual_revenue = 250 * 2 + 175 * 7 + 225 * 6 + 275 * (floor_num - 15)\n #annual_revenue = 430300\n equipment_cost = 20000000\n return_period_1 = 50\n return_period_2 = 300\n apeak_1 = max_acc #g's\n xpeak_1 = 100*max_disp/1524 #% roof drift\n structural_damage_1 = scipy.stats.norm(1.5, 0.5).cdf(xpeak_1)\n equipment_damage_1 = scipy.stats.norm(1.75, 0.7).cdf(apeak_1)\n economic_loss_1 = structural_damage_1*construction_cost + equipment_damage_1*equipment_cost\n annual_economic_loss_1 = economic_loss_1/return_period_1\n structural_damage_2 = 0.5\n equipment_damage_2 = 0.5\n economic_loss_2 = structural_damage_2*construction_cost + equipment_damage_2*equipment_cost\n annual_economic_loss_2 = economic_loss_2/return_period_2\n annual_seismic_cost = annual_economic_loss_1 + annual_economic_loss_2\n fabi = annual_revenue - annual_building_cost - annual_seismic_cost\n return fabi\n\ndef write_to_excel(wb, all_fabi, save_loc):\n print('Writing all results to Excel...')\n filepath = save_loc + '/Results.xlsx'\n wb = openpyxl.Workbook()\n ws = wb.active\n ws['A1'] = 'Tower #'\n ws['B1'] = 'FABI'\n fabi_num = 1\n for fabi in all_fabi:\n ws['A' + str(fabi_num + 1)].value = fabi_num\n ws['B' + str(fabi_num + 1)].value = fabi\n fabi_num += 1\n wb.save(filepath)\n\n\n\n\n#----START-----------------------------------------------------START----------------------------------------------------#\n\n\n\nprint('\\n--------------------------------------------------------')\nprint('Autobuilder by University of Toronto Seismic Design Team')\nprint('--------------------------------------------------------\\n')\n\n#Read in the excel workbook\nprint(\"\\nReading Excel spreadsheet...\")\nwb = load_workbook('SetupAB.xlsm')\nExcelIndex = ReadExcel.get_excel_indices(wb, 'A', 'B', 2)\n\nSections = ReadExcel.get_properties(wb,ExcelIndex,'Section')\nMaterials = ReadExcel.get_properties(wb,ExcelIndex,'Material')\nBracing = ReadExcel.get_bracing(wb,ExcelIndex,'Bracing')\nFloorPlans = ReadExcel.get_floor_plans(wb,ExcelIndex)\nFloorBracing = ReadExcel.get_bracing(wb,ExcelIndex,'Floor Bracing')\nAllTowers = ReadExcel.read_input_table(wb, ExcelIndex)\nSaveLoc = ExcelIndex['Save location']\nTimeHistoryLoc = ExcelIndex['Time history location']\n\nprint('\\nInitializing SAP2000 model...')\n# create SAP2000 object\nSapObject = win32com.client.Dispatch('SAP2000v15.SapObject')\n# start SAP2000\nSapObject.ApplicationStart()\n# create SapModel Object\nSapModel = SapObject.SapModel\n# initialize model\nSapModel.InitializeNewModel()\n# create new blank model\nret = SapModel.File.NewBlank()\n\n#Define new materials\nprint(\"\\nDefining materials...\")\nN_m_C = 10\nSapModel.SetPresentUnits(N_m_C)\nfor Material, MatProps in Materials.items():\n MatName = MatProps['Name']\n MatType = MatProps['Material type']\n MatWeight = MatProps['Weight per volume']\n MatE = MatProps['Elastic modulus']\n MatPois = MatProps['Poisson\\'s ratio']\n MatTherm = MatProps['Thermal coefficient']\n #Create material type\n ret = SapModel.PropMaterial.SetMaterial(MatName, MatType)\n if ret != 0:\n print('ERROR creating material type')\n #Set isotropic material proprties\n ret = SapModel.PropMaterial.SetMPIsotropic(MatName, MatE, MatPois, MatTherm)\n if ret != 0:\n print('ERROR setting material properties')\n #Set unit weight\n ret = SapModel.PropMaterial.SetWeightAndMass(MatName, 1, MatWeight)\n if ret != 0:\n print('ERROR setting material unit weight')\n\n#Define new sections\nprint('Defining sections...')\nkip_in_F = 3\nSapModel.SetPresentUnits(kip_in_F)\nfor Section, SecProps in Sections.items():\n SecName = SecProps['Name']\n SecArea = SecProps['Area']\n SecTors = SecProps['Torsional constant']\n SecIn3 = SecProps['Moment of inertia about 3 axis']\n SecIn2 = SecProps['Moment of inertia about 2 axis']\n SecSh2 = SecProps['Shear area in 2 direction']\n SecSh3 = SecProps['Shear area in 3 direction']\n SecMod3 = SecProps['Section modulus about 3 axis']\n SecMod2 = SecProps['Section modulus about 2 axis']\n SecPlMod3 = SecProps['Plastic modulus about 3 axis']\n SecPlMod2 = SecProps['Plastic modulus about 2 axis']\n SecRadGy3 = SecProps['Radius of gyration about 3 axis']\n SecRadGy2 = SecProps['Radius of gyration about 2 axis']\n SecMat = SecProps['Material']\n #Create section property\n ret = SapModel.PropFrame.SetGeneral(SecName, SecMat, 0.1, 0.1, SecArea, SecSh2, SecSh3, SecTors, SecIn2, SecIn3, SecMod2, SecMod3, SecPlMod2, SecPlMod3, SecRadGy2, SecRadGy3, -1)\n if ret != 0:\n print('ERROR creating section property ' + SecName)\n\nAllFABI = []\nTowerNum = 1\nComputeTimes = []\n\n# Define load cases\nSapModel = define_loading(SapModel, TimeHistoryLoc, SaveLoc)\n# Start scatter plot of FABI\nxdata = []\nydata = []\naxes = plt.gca()\naxes.set_xlim(1, len(AllTowers))\naxes.set_ylim(bottom=0)\nScatterPlot, = axes.plot(xdata, ydata, 'ro')\nplt.grid(True, 'both', 'both')\nplt.xlabel('Tower Number')\nplt.ylabel('FABI')\nplt.show(block=False)\nplt.ion()\n\n\nStartTime = time.time()\n# Build all towers defined in spreadsheet\nfor Tower in AllTowers:\n print('\\nBuilding tower number ' + str(TowerNum))\n print('-------------------------')\n NumFloors = len(Tower.floor_plans)\n CurFloorNum = 1\n CurFloorElevation = 0\n # Build each floor of the tower\n\n while CurFloorNum <= NumFloors:\n print('Floor ' + str(CurFloorNum))\n if CurFloorNum <= NumFloors:\n SapModel = build_floor_plan_and_bracing(SapModel, Tower, FloorPlans, FloorBracing, CurFloorNum, CurFloorElevation)\n if CurFloorNum < NumFloors:\n SapModel = build_face_bracing(SapModel, Tower, FloorPlans, Bracing, CurFloorNum, CurFloorElevation)\n #INSERT FUNCTION TO CREATE COLUMNS AT CURRENT FLOOR\n\n CurFloorHeight = Tower.floor_heights[CurFloorNum - 1]\n CurFloorElevation = CurFloorElevation + CurFloorHeight\n CurFloorNum += 1\n # Set fixed end conditions on all ground floor nodes\n SapModel = set_base_restraints(SapModel)\n # Save the file\n SapModel.File.Save(SaveLoc + '/Tower ' + str(TowerNum))\n #Analyse tower and print results to spreadsheet\n print('\\nAnalyzing tower number ' + str(TowerNum))\n print('-------------------------')\n #run analysis and get weight and acceleration\n [MaxAcc, MaxDisp, Weight] = run_analysis(SapModel)\n #Calculate model FABI\n AllFABI.append(get_FABI(MaxAcc, MaxDisp, Tower.footprint, Weight))\n ##IS THIS FABI OR SEISMIC COST??\n #Print results to spreadsheet\n #Unlock model\n SapModel.SetModelIsLocked(False)\n # Delete everything in the model\n ret = SapModel.SelectObj.All(False)\n if ret != 0:\n print('ERROR selecting all')\n ret = SapModel.FrameObj.Delete(Name='', ItemType=2)\n if ret != 0:\n print('ERROR deleting all')\n # Determine total time taken to build current tower\n EndTime = time.time()\n TimeToComputeTower = EndTime - StartTime\n ComputeTimes.append(TimeToComputeTower)\n AverageComputeTime = sum(ComputeTimes) / len(ComputeTimes)\n ElapsedTime = sum(ComputeTimes)\n EstimatedTimeRemaining = (len(AllTowers) - TowerNum) * AverageComputeTime\n if EstimatedTimeRemaining <= 60:\n TimeUnitEstTime = 'seconds'\n elif EstimatedTimeRemaining > 60 and EstimatedTimeRemaining < 3600:\n TimeUnitEstTime = 'minutes'\n EstimatedTimeRemaining = EstimatedTimeRemaining / 60\n else:\n TimeUnitEstTime = 'hours'\n EstimatedTimeRemaining = EstimatedTimeRemaining / 3600\n\n if ElapsedTime <= 60:\n TimeUnitElaTime = 'seconds'\n elif ElapsedTime > 60 and ElapsedTime < 3600:\n TimeUnitElaTime = 'minutes'\n ElapsedTime = ElapsedTime / 60\n else:\n TimeUnitElaTime = 'hours'\n ElapsedTime = ElapsedTime / 3600\n #Round the times to the nearest 0.1\n AverageComputeTime = int(AverageComputeTime/1) + round(AverageComputeTime - int(AverageComputeTime/1),1)\n EstimatedTimeRemaining = int(EstimatedTimeRemaining/1) + round(EstimatedTimeRemaining - int(EstimatedTimeRemaining/1),1)\n ElapsedTime = int(ElapsedTime/1) + round(ElapsedTime - int(ElapsedTime/1),1)\n\n # Add FABI to scatter plot\n xdata.append(TowerNum)\n ydata.append(AllFABI[TowerNum-1])\n ScatterPlot.set_xdata(xdata)\n ScatterPlot.set_ydata(ydata)\n plt.xlim(0, TowerNum + 1)\n #plt.ylim(0, max(AllFABI) + max(AllFABI) / 4)\n plt.ylim(0, min(AllFABI) + min(AllFABI) / 4)\n plt.xticks(numpy.arange(min(xdata), max(xdata)+1, 1.0))\n plt.title('Average time per tower: ' + str(AverageComputeTime) + ' seconds\\n' + 'Estimated time remaining: ' + str(EstimatedTimeRemaining) + ' ' + TimeUnitEstTime + '\\nElapsed time so far: ' + str(ElapsedTime) + ' ' + TimeUnitElaTime)\n plt.draw()\n plt.pause(1e-6)\n plt.show(block=False)\n plt.ion()\n # Increment tower number\n TowerNum += 1\n\nprint('\\n\\nFinished constructing all towers.')\n\n# Write all results to excel spreadsheet\nwrite_to_excel(wb, AllFABI, SaveLoc)\n# Close SAP2000\nprint('Closing SAP2000...')\n#SapObject.ApplicationExit(False)\nprint('FINISHED.')\nplt.show(block=True)\n\n","repo_name":"shirleyzhang2/autobuilder","sub_path":"Autobuilder.py","file_name":"Autobuilder.py","file_ext":"py","file_size_in_byte":19482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"70889207208","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Album, Memory\nfrom .forms import AlbumForm, MemoryForm\n\n@login_required\ndef album_create(request):\n if request.method == 'POST':\n form = AlbumForm(request.POST)\n if form.is_valid():\n album = form.save(commit=False)\n album.owner = request.user\n album.save()\n return redirect('album_detail', album_id=album.pk)\n else:\n form = AlbumForm()\n return render(request, 'templates/album_create.html', {'form': form})\n\n@login_required\ndef album_detail(request, album_id):\n album = get_object_or_404(Album, pk=album_id)\n memories = Memory.objects.filter(album=album)\n return render(request, 'templates/album_detail.html', {'album': album, 'memories': memories})\n\n@login_required\ndef album_edit(request, album_id):\n album = get_object_or_404(Album, pk=album_id)\n if request.method == 'POST':\n form = AlbumForm(request.POST, instance=album)\n if form.is_valid():\n form.save()\n return redirect('album_detail', album_id=album.pk)\n else:\n form = AlbumForm(instance=album)\n return render(request, 'templates/album_edit.html', {'form': form, 'album': album})\n\n@login_required\ndef memory_upload(request, album_id):\n album = get_object_or_404(Album, pk=album_id)\n if request.method == 'POST':\n form = MemoryForm(request.POST, request.FILES)\n if form.is_valid():\n memory = form.save(commit=False)\n memory.album = album\n memory.save()\n return redirect('album_detail', album_id=album.pk)\n else:\n form = MemoryForm()\n return render(request, 'templates/memory_upload.html', {'form': form, 'album': album})\n\n","repo_name":"DT-GAMER/MemoriesGallery","sub_path":"assignment_4/assignment_4/albums/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"40356780124","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nfrom scrapy import log\nimport pymysql\nimport re\nfrom pprint import pprint\nfrom datetime import datetime\nimport os\n\nclass SiePipeline(object):\n cursor =None\n connection = None\n SerialID =None\n EvalID =None\n VerID =None\n first_open =False # パイプラインが初回起動時か同課のフラグ\n last_id = {}\n dbname = \"appstore\"\n\n filename =os.getcwd()+ \"\\\\\" + \"logfile.txt\"\n filestream = None\n\n tables_id ={\n \"APT\":\"titledb\",\n \"APV\":\"versiondb\",\n \"APE\":\"evaluationdb\",\n }\n\n use_tables ={\n \"title\":\"titledb\",\n \"ver\":\"versiondb\",\n \"eval\":\"evaluationdb\",\n }\n table_id_1 = \"title\"\n title_column =[]\n version_column = []\n evaluation_column = []\n\n test_column =[\n \"title\",\n \"link\"\n ]\n\n\n def __init__(self):\n #logfile設定\n self.filestream = open(self.filename,\"w\") \n #SQL接続\n self.connection = pymysql.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n db=\"appstore\",\n charset =\"utf8\",\n autocommit=True\n )\n\n #ID設定と件数取得 後で削除\n self.cursor=self.connection.cursor()\n for ID_type in self.tables_id:\n sql = 'SELECT COUNT(*) FROM %s;' % self.tables_id[ID_type]\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n start_number = result[0][0]\n if ID_type == \"APT\":\n self.SerialID = self.get_ID(start_number,\"APT\")\n elif ID_type == \"APV\":\n self.VerID = self.get_ID(start_number,\"APV\")\n elif ID_type == \"APE\":\n self.EvalID = self.get_ID(start_number,\"APE\")\n \n #カラムリストの取得と保持\n for table_name in self.use_tables.values():\n sql = 'SELECT column_name FROM information_schema.columns WHERE table_name = \"%s\" AND table_schema = \"%s\";'%(table_name,self.dbname)\n log.msg(sql,level=log.DEBUG)\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n for column_name in result:\n if table_name ==\"titledb\": \n self.title_column.append(column_name[0])\n #log.msg(pprint(result),level=log.DEBUG)\n #log.msg(pprint(self.title_column),level=log.DEBUG)\n elif table_name == \"versiondb\":\n self.version_column.append(column_name[0])\n #log.msg(pprint(result),level=log.DEBUG)\n #log.msg(pprint(self.version_column),level=log.DEBUG)\n elif table_name ==\"evaluationdb\":\n self.evaluation_column.append(column_name[0])\n #log.msg(pprint(result),level=log.DEBUG)\n #log.msg(pprint(self.evaluation_column),level=log.DEBUG)\n\n def process_item(self, item, spider):\n item[\"serialID\"]=self.SerialID()\n item[\"serialVID\"]=self.VerID()\n item[\"serialEID\"]=self.EvalID()\n self.insert_data_re(item,\"title\")\n log.msg(pprint(self.evaluation_column),level=log.DEBUG)\n self.insert_data_re(item,\"eval\")\n\n\n #title = str(item[\"title\"])\n #title = re.sub(r\"[~\\[\\']\", \"\", title)\n #title = re.sub(r\"[\\'\\]$]\", \"\", title)\n #link = str(item[\"link\"])\n #link = re.sub(r\"[~\\[\\']\", \"\", link)\n #link = re.sub(r\"[\\'\\]$]\", \"\", link)\n\n #sql = ('INSERT INTO %s (title, url) VALUES (\"%s\",\"%s\");' % (use_table,title,link)).encode(\"utf8\")\n\n\n #self.cursor.execute(sql)\n \n return item\n\n\n def update_data_re(self,item,type,*where):\n tablename = None\n columlist = None\n #テーブルにより処理を分岐\n if type == \"title\":\n tablename = self.use_tables[\"title\"]\n columlist = self.title_column \n elif type == \"ver\":\n tablename = self.use_tables[\"ver\"]\n columlist = self.version_column\n elif type == \"eval\":\n tablename = self.use_tables[\"eval\"]\n columlist = self.evaluation_column\n elif type == \"test\":\n tablename = self.use_tables[\"test\"]\n columlist = self.test_clumn\n else:\n return false\n #SQL文生成\n sql ='UPDATE ' \n sql += tablename\n \n sql += \" SET \"\n for num in range(0,len(columlist)):\n if columlist[num] == \"SerialID\": continue #更新処理の場合 シリアルIDの処理をスキップ\n #型判定分岐処理\n temp =None\n sql += columlist[num] + \"=\"\n if isinstance(item[columlist[num]],str): #文字列\n temp = \"'\" + item[columlist[num]] + \"'\"\n else :\n temp = str(item[columlist[num]]) #数字\n sql += temp\n \n if num != len(columlist)-1: sql += \", \"\n \n \n #クエリ挿入処理 \n sql += \" \"\n\n sql += 'WHERE '\n for num in range(0,len(where)):\n if isinstance(item[where[num]],str): #文字列\n temp = \"'\" + item[where[num]] + \"'\"\n else :\n temp = str(item[where[num]]) #数字 \n sql += where[num] + \"=\" + temp \n\n if num != len(where)-1: sql += \" and \" \n sql += \";\"\n log.msg(\" update\" + sql,level = log.DEBUG)\n try:\n resultNumber=self.cursor.execute(sql)\n #if (resultNumber != 2):\n #raise Exception(\"error mysql\") \n except:\n \n self.filestream.write(pprint(item))\n \n\n def insert_data_re(self,item,type):\n tablename = None\n columlist = None\n #テーブルにより処理を分岐\n if type == \"title\":\n tablename = self.use_tables[\"title\"]\n columlist = self.title_column \n elif type == \"ver\":\n tablename = self.use_tables[\"ver\"]\n columlist = self.version_column\n elif type == \"eval\":\n tablename = self.use_tables[\"eval\"]\n columlist = self.evaluation_column\n elif type == \"test\":\n tablename = self.use_tables[\"test\"]\n columlist = self.test_clumn\n else:\n return false\n #SQL文生成\n sql ='INSERT INTO ' \n sql += tablename\n sql += \" (\"\n \n for num in range(0,len(columlist)):\n \n if num == len(columlist)-1: sql += columlist[num]\n else: sql += columlist[num] + \", \"\n sql += \")\"\n sql += \" VALUES (\"\n for num in range(0,len(columlist)):\n #型判定分岐処理\n temp =None \n if isinstance(item[columlist[num]],str):\n temp = \"'\" + item[columlist[num]] + \"'\"\n else :\n temp = str(item[columlist[num]])\n #クエリ挿入処理 \n if num == len(columlist)-1:sql += temp \n else: sql += temp + \", \"\n sql += \");\"\n\n log.msg(\"insert\" + sql,level = log.DEBUG)\n try:\n resultNumber=self.cursor.execute(sql)\n #if (resultNumber != 1):\n #raise Exception(\"error mysql\")\n except:\n self.filestream.write(pprint(sql)+\"\\n\")\n \n def get_ID(self,last_number,IDname=\"AP\"):#一つ前の件数を取得し代入\n num =last_number\n ID_type = IDname\n \n def closure():\n nonlocal num\n nonlocal ID_type\n\n max_size = 12 #桁の最大数 \n num +=1 #インクリメント\n\n strnum = str(num)#文字列として保存\n for var in range(0,max_size - len(strnum)): #最大桁数と現在の数字の桁数の差分の回数繰り返し、0で埋める\n strnum = \"0\" + strnum\n\n ID = ID_type+strnum\n return ID\n\n\n return closure","repo_name":"rcgs/appstoreCrawler","sub_path":"SIE/SIE/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10146124445","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Intro\n# This is my first kaggle competition and I have really enjoyed it so far.\n# The following kernel has been inspired by:\n# + https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data\n# + https://www.kaggle.com/c/PLAsTiCC-2018/discussion/70908\n# + https://www.kaggle.com/meaninglesslives/simple-neural-net-for-time-series-classification\n# \n# Still haven't figured out the feature hidden in here https://www.kaggle.com/c/PLAsTiCC-2018/discussion/70725#416740\n# \n# A big thanks to the kaggle community that makes this competition so enjoyable.\n\n# # Loading Libraries\n# \n# \n# \n\n# In[ ]:\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom sklearn.metrics import log_loss\nfrom sklearn.model_selection import StratifiedKFold\nimport gc\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns \nimport lightgbm as lgb\nfrom catboost import Pool, CatBoostClassifier\nimport itertools\nimport pickle, gzip\nimport glob\nfrom sklearn.preprocessing import StandardScaler\nfrom tsfresh.feature_extraction import extract_features\nnp.warnings.filterwarnings('ignore')\n\n\n# # Extracting Features from train set\n# + Features from olivier's kernel \n# + very smart and powerful feature that is generously given here https://www.kaggle.com/c/PLAsTiCC-2018/discussion/69696#410538\n# + per passband features with tsfresh library. fft features added to capture periodicity https://www.kaggle.com/c/PLAsTiCC-2018/discussion/70346#415506\n\n# In[ ]:\n\n\ngc.enable()\n\ntrain = pd.read_csv('../input/training_set.csv')\n# Features to compute with tsfresh library. Fft coefficient is meant to capture periodicity\nfcp = {'fft_coefficient': [{'coeff': 0, 'attr': 'abs'},{'coeff': 1, 'attr': 'abs'}],'kurtosis' : None, 'skewness' : None}\n\ndef featurize(df):\n df['flux_ratio_sq'] = np.power(df['flux'] / df['flux_err'], 2.0)\n df['flux_by_flux_ratio_sq'] = df['flux'] * df['flux_ratio_sq']\n # train[detected==1, mjd_diff:=max(mjd)-min(mjd), by=object_id]\n\n\n aggs = {\n 'flux': ['min', 'max', 'mean', 'median', 'std','skew'],\n 'flux_err': ['min', 'max', 'mean', 'median', 'std','skew'],\n 'detected': ['mean'],\n 'flux_ratio_sq':['sum','skew'],\n 'flux_by_flux_ratio_sq':['sum','skew'],\n }\n\n agg_df = df.groupby('object_id').agg(aggs)\n new_columns = [\n k + '_' + agg for k in aggs.keys() for agg in aggs[k]\n ]\n agg_df.columns = new_columns\n agg_df['flux_diff'] = agg_df['flux_max'] - agg_df['flux_min']\n agg_df['flux_dif2'] = (agg_df['flux_max'] - agg_df['flux_min']) / agg_df['flux_mean']\n agg_df['flux_w_mean'] = agg_df['flux_by_flux_ratio_sq_sum'] / agg_df['flux_ratio_sq_sum']\n agg_df['flux_dif3'] = (agg_df['flux_max'] - agg_df['flux_min']) / agg_df['flux_w_mean']\n # Add more features with \n agg_df_ts = extract_features(df, column_id='object_id', column_sort='mjd', column_kind='passband', column_value = 'flux', default_fc_parameters = fcp, n_jobs=4)\n # Add smart feature that is suggested here https://www.kaggle.com/c/PLAsTiCC-2018/discussion/69696#410538\n # dt[detected==1, mjd_diff:=max(mjd)-min(mjd), by=object_id]\n df_det = df[df['detected']==1].copy()\n\n agg_df_mjd = extract_features(df_det, column_id='object_id', column_value = 'mjd', default_fc_parameters = {'maximum':None, 'minimum':None}, n_jobs=4)\n agg_df_mjd['mjd_diff_det'] = agg_df_mjd['mjd__maximum'] - agg_df_mjd['mjd__minimum']\n del agg_df_mjd['mjd__maximum'], agg_df_mjd['mjd__minimum']\n agg_df_ts = pd.merge(agg_df_ts, agg_df_mjd, on = 'id')\n # tsfresh returns a dataframe with an index name='id'\n agg_df_ts.index.rename('object_id',inplace=True)\n agg_df = pd.merge(agg_df, agg_df_ts, on='object_id')\n return agg_df\n\nagg_train = featurize(train)\n\n# # Merging extracted features with meta data\n\n# In[ ]:\n\n\nmeta_train = pd.read_csv('../input/training_set_metadata.csv')\nmeta_train.head()\n\nfull_train = agg_train.reset_index().merge(\n right=meta_train,\n how='outer',\n on='object_id'\n)\n\nif 'target' in full_train:\n y = full_train['target']\n del full_train['target']\nclasses = sorted(y.unique())\n\n# Taken from Giba's topic : https://www.kaggle.com/titericz\n# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194\n# with Kyle Boone's post https://www.kaggle.com/kyleboone\nclass_weight = {\n c: 1 for c in classes\n}\nfor c in [64, 15]:\n class_weight[c] = 2\n\nprint('Unique classes : ', classes)\n\nif 'object_id' in full_train:\n oof_df = full_train[['object_id']]\n del full_train['object_id'], full_train['distmod'], full_train['hostgal_specz']\n del full_train['ra'], full_train['decl'], full_train['gal_l'],full_train['gal_b'],full_train['ddf']\n \ntrain_mean = full_train.mean(axis=0)\nfull_train.fillna(0, inplace=True)\n\n# # Class weights and loss functions\n\n# In[ ]:\n\n\n# Compute weights\nw = y.value_counts()\nweights = {i : np.sum(w) / w[i] for i in w.index}\n\ndef multi_weighted_logloss(y_true, y_preds):\n \"\"\"\n @author olivier https://www.kaggle.com/ogrellier\n multi logloss for PLAsTiCC challenge\n \"\"\"\n # class_weights taken from Giba's topic : https://www.kaggle.com/titericz\n # https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194\n # with Kyle Boone's post https://www.kaggle.com/kyleboone\n classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]\n class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}\n if len(np.unique(y_true)) > 14:\n classes.append(99)\n class_weight[99] = 2\n y_p = y_preds\n # Trasform y_true in dummies\n y_ohe = pd.get_dummies(y_true)\n # Normalize rows and limit y_preds to 1e-15, 1-1e-15\n y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)\n # Transform to log\n y_p_log = np.log(y_p)\n # Get the log for ones, .values is used to drop the index of DataFrames\n # Exclude class 99 for now, since there is no class99 in the training set\n # we gave a special process for that class\n y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)\n # Get the number of positives for each class\n nb_pos = y_ohe.sum(axis=0).values.astype(float)\n # Weight average and divide by the number of positives\n class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])\n y_w = y_log_ones * class_arr / nb_pos\n\n loss = - np.sum(y_w) / np.sum(class_arr)\n return loss\n\n\ndef lgb_multi_weighted_logloss(y_true, y_preds):\n \"\"\"\n @author olivier https://www.kaggle.com/ogrellier\n multi logloss for PLAsTiCC challenge\n \"\"\"\n # class_weights taken from Giba's topic : https://www.kaggle.com/titericz\n # https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194\n # with Kyle Boone's post https://www.kaggle.com/kyleboone\n classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]\n class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}\n if len(np.unique(y_true)) > 14:\n classes.append(99)\n class_weight[99] = 2\n y_p = y_preds.reshape(y_true.shape[0], len(classes), order='F')\n\n # Trasform y_true in dummies\n y_ohe = pd.get_dummies(y_true)\n # Normalize rows and limit y_preds to 1e-15, 1-1e-15\n y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)\n # Transform to log\n y_p_log = np.log(y_p)\n # Get the log for ones, .values is used to drop the index of DataFrames\n # Exclude class 99 for now, since there is no class99 in the training set\n # we gave a special process for that class\n y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)\n # Get the number of positives for each class\n nb_pos = y_ohe.sum(axis=0).values.astype(float)\n # Weight average and divide by the number of positives\n class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])\n y_w = y_log_ones * class_arr / nb_pos\n\n loss = - np.sum(y_w) / np.sum(class_arr)\n return 'wloss', loss, False\n\n\ndef save_importances(importances_):\n mean_gain = importances_[['gain', 'feature']].groupby('feature').mean()\n importances_['mean_gain'] = importances_['feature'].map(mean_gain['gain'])\n plt.figure(figsize=(8, 12))\n sns.barplot(x='gain', y='feature', data=importances_.sort_values('mean_gain', ascending=False))\n plt.tight_layout()\n plt.savefig('importances.png')\n\n# # 5-fold lgb training\n\n# In[ ]:\n\n\n\nfolds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\nclfs = []\nimportances = pd.DataFrame()\nlgb_params = {\n 'boosting_type': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 14,\n 'metric': 'multi_logloss',\n 'learning_rate': 0.03,\n 'subsample': .9,\n 'colsample_bytree': 0.5,\n 'reg_alpha': .01,\n 'reg_lambda': .01,\n 'min_split_gain': 0.01,\n 'min_child_weight': 10,\n 'n_estimators': 1000,\n 'silent': -1,\n 'verbose': -1,\n 'max_depth': 3\n}\n\n# Compute weights\nw = y.value_counts()\nweights = {i : np.sum(w) / w[i] for i in w.index}\n\noof_preds = np.zeros((len(full_train), np.unique(y).shape[0]))\nfor fold_, (trn_, val_) in enumerate(folds.split(y, y)):\n trn_x, trn_y = full_train.iloc[trn_], y.iloc[trn_]\n val_x, val_y = full_train.iloc[val_], y.iloc[val_]\n\n clf = lgb.LGBMClassifier(**lgb_params)\n clf.fit(\n trn_x, trn_y,\n eval_set=[(trn_x, trn_y), (val_x, val_y)],\n eval_metric=lgb_multi_weighted_logloss,\n verbose=100,\n early_stopping_rounds=50,\n sample_weight=trn_y.map(weights)\n )\n oof_preds[val_, :] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)\n print(multi_weighted_logloss(val_y, oof_preds[val_, :]))\n\n imp_df = pd.DataFrame()\n imp_df['feature'] = full_train.columns\n imp_df['gain'] = clf.feature_importances_\n imp_df['fold'] = fold_ + 1\n importances = pd.concat([importances, imp_df], axis=0, sort=False)\n\n clfs.append(clf)\n\nprint('MULTI WEIGHTED LOG LOSS : %.5f ' % multi_weighted_logloss(y_true=y, y_preds=oof_preds))\n\n# # Evaluation\n# features' importance and confusion matrix\n\n# In[ ]:\n\n\nsave_importances(importances_=importances)\n# http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n \nunique_y = np.unique(y)\nclass_map = dict()\nfor i,val in enumerate(unique_y):\n class_map[val] = i\n \ny_map = np.zeros((y.shape[0],))\ny_map = np.array([class_map[val] for val in y])\n\n# Compute confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncnf_matrix = confusion_matrix(y_map, np.argmax(oof_preds,axis=-1))\nnp.set_printoptions(precision=2)\n\nsample_sub = pd.read_csv('../input/sample_submission.csv')\nclass_names = list(sample_sub.columns[1:-1])\ndel sample_sub;gc.collect()\n\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(12,12))\nfoo = plot_confusion_matrix(cnf_matrix, classes=class_names,normalize=True,\n title='Confusion matrix')\n\n# # Test Set Predictions\n\n# In[ ]:\n\n\ndef predict_chunk(df_, clfs_, meta_, features, train_mean):\n # Group by object id \n agg_ = featurize(df_)\n # Merge with meta data\n full_test = agg_.reset_index().merge(\n right=meta_,\n how='left',\n on='object_id'\n )\n\n full_test = full_test.fillna(0)\n # Make predictions\n preds_ = None\n for clf in clfs_:\n if preds_ is None:\n preds_ = clf.predict_proba(full_test[features]) / len(clfs_)\n else:\n preds_ += clf.predict_proba(full_test[features]) / len(clfs_)\n\n # Compute preds_99 as the proba of class not being any of the others\n # preds_99 = 0.1 gives 1.769\n preds_99 = np.ones(preds_.shape[0])\n for i in range(preds_.shape[1]):\n preds_99 *= (1 - preds_[:, i])\n\n # Create DataFrame from predictions\n preds_df_ = pd.DataFrame(preds_, columns=['class_' + str(s) for s in clfs_[0].classes_])\n preds_df_['object_id'] = full_test['object_id']\n preds_df_['class_99'] = 0.14 * preds_99 / np.mean(preds_99) \n return preds_df_\n\n# In[ ]:\n\n\nmeta_test = pd.read_csv('../input/test_set_metadata.csv')\n# meta_test.set_index('object_id',inplace=True)\nimport time\n\nstart = time.time()\nchunks = 5000000\nremain_df = None\n\nfor i_c, df in enumerate(pd.read_csv('../input/test_set.csv', chunksize=chunks, iterator=True)):\n # Check object_ids\n # I believe np.unique keeps the order of group_ids as they appear in the file\n unique_ids = np.unique(df['object_id'])\n new_remain_df = df.loc[df['object_id'] == unique_ids[-1]].copy()\n if remain_df is None:\n df = df.loc[df['object_id'].isin(unique_ids[:-1])]\n else:\n df = pd.concat([remain_df, df.loc[df['object_id'].isin(unique_ids[:-1])]], axis=0)\n # Create remaining samples df\n remain_df = new_remain_df\n preds_df = predict_chunk(df_=df,\n clfs_=clfs,\n meta_=meta_test,\n features=full_train.columns,\n train_mean=train_mean)\n\n if i_c == 0:\n preds_df.to_csv('predictions.csv', header=True, mode='a', index=False)\n else:\n preds_df.to_csv('predictions.csv', header=False, mode='a', index=False)\n\n del preds_df\n gc.collect()\n \n print('%15d done in %5.1f minutes' % (chunks * (i_c + 1), (time.time() - start) / 60), flush=True)\n\n# Compute last object in remain_df\npreds_df = predict_chunk(df_=remain_df,\n clfs_=clfs,\n meta_=meta_test,\n features=full_train.columns,\n train_mean=train_mean)\n\npreds_df.to_csv('predictions.csv', header=False, mode='a', index=False)\n\n# In[ ]:\n\n\nz = pd.read_csv('predictions.csv')\nprint(\"Shape BEFORE grouping:\",z.shape)\nz = z.groupby('object_id').mean()\nprint(\"Shape AFTER grouping:\",z.shape)\nz.to_csv('single_predictions.csv', index=True)\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample136.py","file_name":"sample136.py","file_ext":"py","file_size_in_byte":14943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"27719326964","text":"from django.urls import path\nfrom django.conf.urls import include, url\nfrom rest_framework import routers\nfrom rest_framework_swagger.views import get_swagger_view\nfrom api import views\n\nrouter = routers.DefaultRouter(trailing_slash=False)\n\nrouter.register(\n prefix='feed', \n viewset=views.FeedItemViewSet, \n basename='feed'\n)\n\nrouter.register(\n prefix='feed/(?P[^/.]+)/comments',\n viewset=views.FeedItemCommentViewSet,\n basename='feed_comments'\n)\n\nrouter.register(\n prefix='feed/(?P[^/.]+)/images',\n viewset=views.ImageViewSet,\n basename='feed_images'\n)\n\nrouter.register(\n prefix='images', \n viewset=views.ImageViewSet, \n basename='image'\n)\n\nschema_view = get_swagger_view(title='Django API')\n\nurlpatterns = router.urls + [url('docs', schema_view)]","repo_name":"mark-randall/Django_API_on_GCP","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19816324852","text":"# -*- coding: utf-8 -*-\nimport reportlab\nimport datetime\nfrom cStringIO import StringIO\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.lib import colors, enums\nfrom reportlab.lib.units import cm, mm\nfrom django.conf import settings\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\n\nfrom reportlab.platypus import *\n\nDEBUG = 0\n\nSELLER = u\"\"\"\n Seller info here.\n\"\"\"\n\nclass InvoicePageTemplate(PageTemplate):\n def __init__(self, id, pagesize=A4):\n self.pageWidth = pagesize[0]\n self.pageHeight = pagesize[1]\n frames = []\n frames.append(Frame(x1=0*mm, y1=285*mm, width=210*mm, height=12*mm, showBoundary=DEBUG))\n frames.append(Frame(x1=15*mm, y1=245*mm, width=100*mm, height=4*cm, showBoundary=DEBUG))\n frames.append(Frame(x1=15*mm, y1=205*mm, width=100*mm, height=4*cm, showBoundary=DEBUG))\n frames.append(Frame(x1=115*mm, y1=245*mm, width=8*cm, height=4*cm, showBoundary=DEBUG))\n frames.append(Frame(x1=115*mm, y1=205*mm, width=8*cm, height=4*cm, showBoundary=DEBUG))\n frames.append(Frame(x1=15*mm, y1=20*mm, width=180*mm, height=185*mm, showBoundary=DEBUG))\n PageTemplate.__init__(self, id, frames)\n\n def beforeDrawPage(self, canv, doc):\n canv.drawImage(settings.PROJECT_PATH+'/fvat_logo.jpg', 15*mm, 12*cm, 18*cm, 71*mm)\n\nclass Invoice():\n def __init__(self, order, rodzaj='original'):\n self.order = order\n r = {'original':u\"Oryginał\", 'copy':u\"Kopia\"}\n self.INVOICE_INFO = u\"\"\"Faktura VAT %(number)s %(rodzaj)s\n Data wystawienia: %(invoice_time)s, Kozy\n Data sprzedaży: %(order_created)s\n \"\"\" % {\n 'rodzaj': r[rodzaj],\n 'number': self.order.invoice_number,\n 'invoice_time': self.order.created.strftime(\"%d-%m-%Y\"),\n 'order_created': self.order.created.strftime(\"%d-%m-%Y\"),\n }\n\n pdfmetrics.registerFont(TTFont('Arial-Bold', settings.PROJECT_PATH+'/fonts/arialbd.ttf'))\n pdfmetrics.registerFont(TTFont('Arial', settings.PROJECT_PATH+'/fonts/arial.ttf'))\n from orders.pdf import styles\n self.styles = {\n 'normal': styles.normal,\n 'normal_bold':styles.bold,\n 'head':styles.head,\n 'bold_center':styles.bold_center,\n 'right':styles.right,\n 'center':styles.center,\n }\n\n def _get_address(self, type='billing'):\n \"\"\"\n Render addresses on the invoice\n \"\"\"\n elements = []\n if type == 'billing':\n address = self.order.user.get_profile().billing_address()\n else:\n address = self.order.user.get_profile().shipping_address()\n #elements.append(Spacer(width=2*cm, height=2*cm))\n for line in address.display_pdf.splitlines():\n elements.append(Paragraph(line, self.styles['normal_bold']))\n return elements\n\n def _get_items_table(self):\n data = []\n ts = [\n ('ALIGN', (1,1), (-1,-1), 'CENTER'),\n ('GRID',(0,0),(-1,-1), 0.5, colors.grey),\n ('VALIGN',(0,0),(-1,-1), 'MIDDLE'),\n #('SPAN',(0,-3),(3,-3)),\n #('SPAN',(0,-2),(3,-2)),\n #('SPAN',(0,-1),(3,-1)),\n ]\n head = [\n 'Lp.',\n 'Nazwa towaru',\n 'Ilość',\n 'Cena j. netto [zł]',\n 'Wartość netto [zł]',\n 'VAT [%]',\n 'kwota VAT [zł]',\n 'Wartość brutto [zł]'\n ]\n head_p = []\n for h in head:\n head_p.append(Paragraph(h, self.styles['bold_center']))\n data.append(head_p)\n for i, item in enumerate(self.order.ordereditem_set.all()):\n row = [\n Paragraph(str(i+1)+\".\", self.styles['normal']), \n Paragraph(item.description.replace(\"&\",\" \"), self.styles['normal']),\n Paragraph(str(item.quantity), self.styles['normal']),\n Paragraph(\"%.2f\" % item.price_nett(), self.styles['normal']),\n Paragraph(\"%.2f\" % item.price_total_nett(), self.styles['normal']),\n Paragraph('22', self.styles['normal']),\n Paragraph(\"%.2f\" % item.price_tax(), self.styles['normal']),\n Paragraph(\"%.2f\" % item.get_price_total(), self.styles['normal']),\n ]\n data.append(row)\n if self.order.shipping_price > 0:\n shipping = [\n Paragraph(str(self.order.ordereditem_set.count()+1), self.styles['normal']),\n Paragraph('Koszt dostawy', self.styles['normal']),\n Paragraph('1', self.styles['normal']),\n Paragraph(\"%.2f\" % self.order.shipping_price_nett(), self.styles['normal']),\n Paragraph(\"%.2f\" % self.order.shipping_price_nett(), self.styles['normal']),\n Paragraph('22', self.styles['normal']),\n Paragraph(\"%.2f\" % self.order.shipping_price_tax(), self.styles['normal']),\n Paragraph(\"%.2f\" % self.order.shipping_price, self.styles['normal']),\n ]\n data.append(shipping)\n return Table(data, colWidths=[1*cm, 7*cm, 1*cm, 2*cm, 2*cm, 1*cm, 2*cm, 2*cm], style=ts)\n\n def _table_footer(self):\n ts = [\n ('ALIGN', (1,1), (-1,-1), 'CENTER'),\n ('GRID',(4,0),(-1,-1), 0.5, colors.grey),\n ('LINEABOVE',(0,0),(-1,0), 0, colors.grey),\n ('VALIGN',(0,0),(-1,-1), 'MIDDLE'),\n #('SPAN',(0,0),(0,-1)),\n ('SPAN',(0,0),(3,0)),\n ('SPAN',(0,1),(3,1)),\n ]\n footer = [\n [\n Paragraph('Rabat:', self.styles['right']),\n '', '', '',\n Paragraph(\"%.2f\" % self.order.discount_nett(), self.styles['normal']),\n Paragraph('22', self.styles['normal']),\n Paragraph(\"%.2f\" % self.order.discount_tax(), self.styles['normal']),\n Paragraph(\"%.2f\" % float(self.order.discount), self.styles['normal']),\n ],\n [\n Paragraph('Razem:', self.styles['right']),\n '', '', '',\n Paragraph(\"%.2f\" % self.order.discounted_price_nett(), self.styles['normal']),\n Paragraph('22', self.styles['normal']),\n Paragraph(\"%.2f\" % self.order.discounted_price_tax(), self.styles['normal']),\n Paragraph(\"%.2f\" % self.order.discounted_price(), self.styles['normal']),\n ],\n ]\n return Table(footer, colWidths=[1*cm, 7*cm, 1*cm, 2*cm, 2*cm, 1*cm, 2*cm, 2*cm], style=ts)\n\n def _head(self):\n elements = []\n # sprzedawca\n elements.append(FrameBreak())\n elements.append(Paragraph(\"Sprzedawca:\", self.styles['normal']))\n for line in SELLER.splitlines():\n p = Paragraph(line, self.styles['normal_bold'])\n elements.append(p)\n # adres billingowy\n elements.append(FrameBreak())\n elements.append(Paragraph(\"Nabywca:\", self.styles['normal']))\n elements += self._get_address('billing')\n # dane fvat\n elements.append(FrameBreak())\n for line in self.INVOICE_INFO.splitlines():\n p = Paragraph(''+line+'', self.styles['normal_bold'])\n elements.append(p)\n # adres shipping\n elements.append(FrameBreak())\n elements.append(Paragraph(\"Adresat:\", self.styles['normal']))\n elements += self._get_address('shipping')\n elements.append(FrameBreak())\n elements.append(Paragraph(u\"Zamówienie: \"+self.order.name, self.styles['head']))\n return elements\n\n def _signatures(self):\n dots = \".\"\n for d in range(0,80):\n dots += \".\"\n\n ts = [\n ('ALIGN', (0,0),(-1,-1), 'CENTER'),\n #('GRID',(0,0),(-1,-1), 0, colors.grey),\n ]\n\n data = [\n [ Paragraph(dots, self.styles['center']),Paragraph(dots, self.styles['center'])],\n [ Paragraph(u\"Osoba upoważniona do odbioru\", self.styles['center']),Paragraph(u\"Osoba upoważniona do wystawienia\", self.styles['center'])],\n ]\n return Table(data, colWidths=[9*cm, 9*cm], style=ts)\n\n def pdf(self):\n buffer = StringIO()\n elements = []\n\n # page templates\n pagetemplates = []\n pagetemplates.append(InvoicePageTemplate(id='FirstPage'))\n document = BaseDocTemplate(buffer, pagesize=A4, pageTemplates=pagetemplates)\n # head\n elements += self._head()\n elements.append(self._get_items_table())\n elements.append(self._table_footer())\n #elements.append(Spacer(width=2*cm, height=5*mm))\n elements.append(Paragraph(u\"Razem do zapłaty: \"+str(self.order.discounted_price())+u\" zł\", self.styles['head']))\n #elements.append(Spacer(width=2*cm, height=10*mm))\n elements.append(self._signatures())\n document.build(elements)\n result = buffer.getvalue()\n buffer.close()\n return result\n","repo_name":"dekoza/django-minishop","sub_path":"orders/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":9139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"12348927898","text":"from flask import Flask, render_template, request\nfrom flask import send_file\nfrom sqlalchemy import desc\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nimport io, os\nfrom datetime import datetime\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nfrom jinja2 import FileSystemLoader\n\nimport importlib\n\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n# templatesディレクトリ以外からもマクロを指定したいので、jinja2が検索するパスを追加\napp.jinja_loader = FileSystemLoader([\"./templates\", \"./competitions\"])\n\n@app.route(\"/\")\ndef top_page():\n # 暫定トップページ。competitions以下にある物全てにリンクを用意\n s = \"\".join([\"competiton: {0}
\".format(i) for i in os.listdir(\"./competitions\")])\n return s\n\n@app.route(\"//\")\n@app.route(\"//overview\")\ndef overview_page(compe):\n return render_template(\"overview.html\", macro_src=\"./\" + compe + \"/macro.html\")\n\n@app.route(\"//data\")\ndef data_page(compe):\n return render_template(\"data.html\", macro_src=\"./\" + compe + \"/macro.html\")\n\n@app.route(\"//leaderboard\")\ndef leaderboard_page(compe):\n ScoreCalculator = importlib.import_module(\"competitions.\" + compe + \".ScoreCalculator\")\n sc = ScoreCalculator.ScoreCalculator(\"./competitions/\" + compe + \"/true_answer.pkl\")\n db = load_db(compe, sc.main_score, sc.disp_score, sc.ascending)\n return render_template(\"leaderboard.html\", tables=db, compe=compe)\n\n@app.route(\"//submit\")\ndef submit_page(compe):\n return render_template(\"submit.html\", compe=compe)\n\n\n@app.route(\"//mysubmission\")\ndef mysub_page(compe):\n ScoreCalculator = importlib.import_module(\"competitions.\" + compe + \".ScoreCalculator\")\n sc = ScoreCalculator.ScoreCalculator(\"./competitions/\" + compe + \"/true_answer.pkl\")\n db = load_db(compe, sc.main_score, sc.disp_score, sc.ascending)\n return render_template(\"mysubmission.html\", tables=db, compe=compe)\n\n\n@app.route(\"//submitresult\", methods=['POST'])\ndef submitresult(compe):\n # 例外の読み込み\n ScoreCalculator = importlib.import_module(\"competitions.\" + compe + \".ScoreCalculator\")\n \n submit_title = request.form[\"submit_name\"]\n user_name = request.form[\"user_name\"]\n filestream = request.files[\"upload_file\"]\n try:\n file_content = decode_file(filestream)\n df_submit = convert_dataframe(file_content)\n # calculate score\n sc, scores = get_scores(df_submit, compe)\n except (ValueError, UnicodeDecodeError):\n return \"submited file failed to convert data frame. please check. back\"\n except ScoreCalculator.FileCheckError as e:\n return e.message + \"\\n back\"\n\n engine = create_engine(\"sqlite:///competitions/\" + compe + \"/submission.db\", echo=False)\n session = sessionmaker(bind=engine)()\n # add file contents and upload infomation into database\n add_submitdb(user_id=user_name, submit_title=submit_title, # add user_name as id\n file_content=file_content, session=session, compe=compe)\n # add scores into database\n add_scoredb(title=submit_title, user_id=user_name, session=session, compe=compe, **scores)\n\n db = load_db(compe, sc.main_score, sc.disp_score, sc.ascending)\n return render_template(\"submitresult.html\", tables=db, main_score=scores[sc.main_score], compe=compe,\n macro_src=\"./\" + compe + \"/macro.html\")\n\n\n@app.route(\"//data_download\", methods=['GET'])\ndef data_download(compe):\n return send_file(\"./competitions/\" + compe + \"/data.zip\", \n as_attachment=True, \n attachment_filename=\"data.zip\", \n mimetype=\"application/zip\") \n\n# 処理関数たち\ndef decode_file(filestream):\n file_content = filestream.read()\n file_utf_8 = file_content.decode(\"utf-8\")\n \n return file_utf_8.strip()\n\ndef convert_dataframe(file_content):\n df_submit = pd.read_csv(io.StringIO(file_content), header=0, delimiter=\"\\t\")\n\n return df_submit\n\ndef get_scores(df_submit, compe):\n # コンペ特有のスコア計算モジュールを読み込み\n ScoreCalculator = importlib.import_module(\"competitions.\" + compe + \".ScoreCalculator\")\n # テキストからスコアを計算する\n sc = ScoreCalculator.ScoreCalculator(\"./competitions/\" + compe + \"/true_answer.pkl\")\n scores = sc.calc_score(df_submit)\n return sc, scores\n\n# データベース周りの関数たち\ndef add_submitdb(user_id, submit_title, file_content, session, compe):\n models = importlib.import_module(\"competitions.\" + compe + \".models\")\n # 提出ファイルのrow_textをデータベースに保存する\n nowtime = datetime.now()\n c2 = models.SubmitStore(user_id=user_id, title=submit_title, \n upload_date=nowtime, raw_text=file_content\n )\n session.add(c2)\n session.commit()\n\n#def add_scoredb(title, user_id, session, compe, total_click, AUC, logloss, Accuracy, pred_click, diff):\ndef add_scoredb(title, user_id, session, compe, **args):\n models = importlib.import_module(\"competitions.\" + compe + \".models\")\n # スコアをデータベースに保存する\n c = models.ScoreStore(title, user_id, **args)\n session.add(c)\n session.commit()\n\ndef load_db(compe, sort_column, display_column, sort_ascending):\n engine = create_engine('sqlite:///competitions/' + compe + '/submission.db', echo=False)\n\n session = sessionmaker(bind=engine)()\n\n tbl_score = pd.read_sql_query(\"SELECT * FROM score ORDER BY \" + sort_column, engine)\n tbl_submit = pd.read_sql_query(\"SELECT * FROM submit\", engine)\n \n tbl_merged = pd.merge(tbl_score, tbl_submit[[\"id\", \"upload_date\"]], on=\"id\", how=\"inner\")\n \n # convert datetime into strings such as \"XX month ago\", or \"XX minitues ago\".\n def convert_time(t):\n time = datetime.strptime(t, \"%Y-%m-%d %H:%M:%S.%f\")\n diff = datetime.now() - time\n\n passed_list = [diff.days//30, diff.days, diff.seconds // 3600, diff.seconds // 60]\n\n accessory = [\"mo\", \"d\", \"hr\", \"min\"]\n\n passed = \"now\"\n for p, a in zip(passed_list, accessory):\n if p == 0: pass\n else:\n passed = \"{}{}{}\".format(p, a, \"s\" if a == \"hr\" and p > 1 else \"\")\n break\n\n return passed\n\n tbl_merged[\"upload_date\"] = tbl_merged[\"upload_date\"].map(convert_time)\n \n # generate entry count\n s = tbl_merged.groupby(\"user_id\").agg({\"id\":\"count\"}).reset_index()\n tbl_merged = pd.merge(tbl_merged, s.rename({\"id\": \"entry\"}, axis=1), on=\"user_id\", how=\"left\")\n\n # leave top score each user\n #top_scores_index = np.ravel(tbl_merged.groupby(\"user_id\").agg({\"total_click\": np.argmax}))\n #tbl_merged = tbl_merged.iloc[top_scores_index]\n\n # align columns order\n tbl_merged = tbl_merged[[\"title\", \"user_id\", sort_column] + display_column + [\"entry\", \"upload_date\"]]\n\n return tbl_merged.sort_values(sort_column, ascending=sort_ascending)\n\n# main routine\nif __name__ == '__main__':\n port = 8080\n app.run(host=\"0.0.0.0\", port=port)\n \n","repo_name":"Shoface4039/lb_open","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"}
+{"seq_id":"25358736298","text":"#how to find the most common word in a text:\r\n\r\nopenfile = input(\"Enter file name: \")\r\nfile = open(openfile)\r\ncounts = dict()\r\n\r\nfor line in file:\r\n words = line.split()\r\n for word in words:\r\n counts[word] = counts.get(word,0) + 1\r\n\r\nbigcount = None\r\nbigword = None\r\n\r\nfor word, count in counts.items():\r\n if bigcount is None or count > bigcount:\r\n bigword = word\r\n bigcount = count\r\n \r\n \r\nprint(bigword,bigcount)\r\n\r\n\r\n\r\n\r\n","repo_name":"SebastianoFazzino/Python-for-Everybody-Specialization-by-University-of-Michigan","sub_path":"Word_coutnter_using_dictionaries.py","file_name":"Word_coutnter_using_dictionaries.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19784074335","text":"srcdir = \"src/\"\n\ndef build(bld):\n\n if bld.env.dolua:\n luaextrac = \" src/pmc_rc.c\"\n luaextrah = \" include/pmc_rc.h\"\n luaextral = \" readConf\"\n else:\n luaextrac = \"\"\n luaextrah = \"\"\n luaextral = \"\"\n luaextradef = []\n if bld.env.hdf5:\n hdf5extral = \" hdf5\"\n else:\n hdf5extral = \"\"\n \n lapackextral = \"\"\n if bld.env.lapack:\n lapackextral = \" lapack \"\n fftw3extral = \"\"\n if bld.env.fftw3:\n fftw3extral = \" fftw3 \"\n\n bld.new_task_gen(\n features = 'cc cshlib',\n source = 'src/pmc.c src/mcmc.c src/distribution.c src/parabox.c src/tools.c src/optimize.c'+luaextrac,\n target = 'pmc',\n includes = 'include/ ../pmctools/include ',\n defines = [\"_PMC_REL_\"],\n uselib_local = 'errorio mvdens '+luaextral,\n uselib = 'gsl gslcblas dl '+hdf5extral+fftw3extral+lapackextral)\n \n bld.install_files('${PREFIX}/include/pmclib', \n 'include/allmc.h include/pmc.h include/distribution.h include/mcmc.h include/parabox.h include/tools.h'+luaextrah)\n \n if bld.env.dompi:\n \n bld.new_task_gen(\n features = 'cc cshlib',\n source = 'src/pmc_mpi.c',\n target = 'pmc_mpi',\n includes = 'include/ ../pmctools/include',\n uselib_local = 'errorio mvdens pmc'+luaextral,\n uselib = 'gsl gslcblas dl'+fftw3extral+lapackextral, \n env = bld.env_of_name('mpi'))\n bld.install_files('${PREFIX}/include/pmclib', \n 'include/pmc_mpi.h')\n bld.new_task_gen(\n features = 'cc cshlib',\n source = 'src/gridMe.c',\n target = 'gridMe',\n includes = 'include/ ../pmctools/include',\n uselib_local = 'errorio mvdens pmc'+luaextral,\n uselib = 'gsl gslcblas dl'+hdf5extral+fftw3extral+lapackextral, \n env = bld.env_of_name('mpi'))\n bld.install_files('${PREFIX}/include/pmclib', \n 'include/gridMe.h')","repo_name":"CosmoStat/pmclib","sub_path":"pmclib/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"40810505065","text":"from photo_emailer.infrastructure.credentials_io import CredentialsIO\n\nfrom photo_emailer.logic.credentials import Credentials\nfrom photo_emailer.infrastructure.email_sender import EmailSender\nfrom photo_emailer.infrastructure.credentials_refresher import CredentialsRefresher\nfrom photo_emailer.infrastructure.browser_authentication import BrowserAuthClient\nfrom email.message import EmailMessage\nfrom google.auth.exceptions import RefreshError\nfrom photo_emailer.infrastructure.image_loader import ImageLoader\nfrom photo_emailer.infrastructure.globber import Globber\nfrom photo_emailer.logic.chunker import chunk_files\n\n\nclass PhotoEmailer:\n def __init__(\n self,\n credentials_loader=None,\n credentials_refresher=None,\n browser_auth_client=None,\n sender=None,\n image_loader=None,\n globber=None,\n image_directory=\"./\",\n max_email_size=25 * 1024 * 1024,\n ):\n self.credentials_loader = (\n credentials_loader\n if credentials_loader is not None\n else CredentialsIO.create(\"token.json\")\n )\n\n self.credentials_refresher = (\n credentials_refresher\n if credentials_refresher is not None\n else CredentialsRefresher.create()\n )\n\n self.browser_auth_client = (\n browser_auth_client\n if browser_auth_client is not None\n else BrowserAuthClient.create()\n )\n\n self.sender = sender if sender is not None else EmailSender.create()\n\n self.image_loader = (\n image_loader if image_loader is not None else ImageLoader.create()\n )\n\n self.globber = globber if globber is not None else Globber.create()\n\n self.image_directory = image_directory\n\n self.max_email_size = max_email_size\n\n self.credentials = None\n\n def load_credentials(self):\n self.credentials = Credentials.from_dict(\n self.credentials_loader.load_credentials()\n )\n\n def refresh_if_needed(self):\n try:\n if self.credentials.is_expired():\n self.credentials = self.credentials_refresher.refresh(self.credentials)\n except RefreshError:\n self.credentials = self.browser_auth_client.run_browser_authentication()\n\n def store_credentials(self):\n self.credentials_loader.store_credentials(self.credentials.to_dict())\n\n def send_email(self, to):\n msg = self.prepare_email(to)\n self.sender.send_email(msg, self.credentials)\n\n def send_emails(self, to):\n for msg in self.prepare_emails(to):\n self.sender.send_email(msg, self.credentials)\n\n def prepare_emails(self, to):\n image_files = self.globber.glob(self.image_directory)\n image_contents = [\n self.image_loader.load_image(image_file) for image_file in image_files\n ]\n chunks = chunk_files(image_contents, self.max_email_size)\n\n msgs = []\n for chunk in chunks:\n msg = EmailMessage()\n msg[\"Subject\"] = \"\"\n msg[\"To\"] = to\n for image in chunk:\n msg.add_attachment(image, maintype=\"image\", subtype=\"jpg\")\n msgs.append(msg)\n return msgs\n\n def prepare_email(self, to):\n msg = EmailMessage()\n msg[\"Subject\"] = \"\"\n msg[\"To\"] = to\n msg.set_content(\"Hello World\")\n\n for image_file in self.globber.glob(self.image_directory):\n image = self.image_loader.load_image(image_file)\n msg.add_attachment(image, maintype=\"image\", subtype=\"jpg\")\n\n return msg\n","repo_name":"cadolphs/google_photo_emailer","sub_path":"photo_emailer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6758690949","text":"# #map\r\n# #map(function,iter(list,tuple,set,dicionary))\r\n# def maultiply(num):\r\n# return num*num\r\n# result=map(multiply,(2,4,6,8))\r\n# print(tuple(result))\r\n\r\n\r\n# #map(function,iter(list,tuple,set,dicionary))\r\n# def multiply(num):\r\n# return num*num\r\n# #result=map(multiply,(2,4,6,8))\r\n# result=map(lamda i:i*i, (2,4,6,8))\r\n# print((tuple((result))))\r\n\r\n\r\n##########\r\n\r\n\r\ndef toUpper(str): \r\n return str.upper()\r\nres=map(toUpper,(\"software\",\"sem\",\"3\"))\r\n# print(list(res))\r\n\r\n\r\nnewlist=list(res)\r\nnewlist.append(\"HEY\")\r\nprint(newlist)\r\n# newlist=tuple(res)\r\n# newlist.append(\"HEY tuple\")\r\n# print(newlist)\r\n\r\ndict_item={\"a\":\"Car\",\"b\":\"Bike\",\"c\":\"Train\"}\r\na=map(lambda i:(i[0]+\"__\",i[1]+\"y\"),dict_item.items())\r\nprint(dict(a))\r\n\r\ndict_item={\"a\":\"Car\",\"b\":\"Bike\",\"c\":\"Train\"}\r\na=map(lambda i:(i[0]+\"__\",i[1]+\"d\"), dict_item())\r\nprint(dict(a))\r\n\r\n\r\n","repo_name":"diyatalsaniya/Demo-GitHub","sub_path":"Map.py","file_name":"Map.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7985234037","text":"#!/usr/bin/env python\n# coding: utf8\n\"\"\"Example of training spaCy's named entity recognizer, starting off with an\nexisting model or a blank model.\nFor more details, see the documentation:\n* Training: https://spacy.io/usage/training\n* NER: https://spacy.io/usage/linguistic-features#named-entities\nCompatible with: spaCy v2.0.0+\n\"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport plac\nimport random\nfrom pathlib import Path\nimport spacy\nimport re\n\nfrom convert_conll2spacy import convert_conll2spacy\n\n# training data\nTRAIN_DATA = [\n ('Who is Shaka Khan?', {\n 'entities': [(7, 17, 'I-PER')]\n }),\n ('I like London and Berlin.', {\n 'entities': [(7, 13, 'I-LOC'), (18, 24, 'I-LOC')]\n })\n]\n \nTEST_DATA = ('Where is Aditya ?',)\n\n\n\n@plac.annotations(\n model=(\"Model name. Defaults to blank 'en' model.\", \"option\", \"m\", str),\n output_dir=(\"Optional output directory\", \"option\", \"o\", Path),\n n_iter=(\"Number of training iterations\", \"option\", \"n\", int))\ndef main(model=None, output_dir=None, n_iter=5):\n \"\"\"Load the model, set up the pipeline and train the entity recognizer.\"\"\"\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe('ner')\n\n # add labels\n traindata = 'data/conll03/eng.train'\n testdata = 'data/conll03/eng.testa' \n Cv = convert_conll2spacy(traindata)\n train_data = Cv.convert()[0]\n Cv = convert_conll2spacy(testdata)\n test_data = Cv.convert()[0]\n for _, annotations in train_data:\n for ent in annotations.get('entities'):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(train_data)\n losses = {}\n for text, annotations in train_data:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n\n# # test the trained model\n tagged_output = 'spacy_trained_with_conll_tested_on_conlltesta'\n# #testfilespacygermeval = open(tagged_output, \"w\")\n# for word in doc:\n# #print(word.text, word.orth, word.lower, word.tag_, word.ent_type_, word.ent_iob)\n# line = word.text + \"\\t\" + word.ent_type_ + \"\\n\"\n# testfilespacygermeval.write(line)\n# i += 1\n# print(i) \n \n# with open(testdata, \"r\") as test_file:\n# lines = test_file.readlines()\n# with open(tagged_output, \"w\") as testfilespacygermeval:\n# for line in lines:\n# text = re.split(\" \", line)[0]\n# doc = nlp(text)\n# print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n# print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n# for word in doc:\n# line = word.text + \"\\t\" + word.ent_type_ + \"\\n\"\n# testfilespacygermeval.write(line)\n# postprocess(tagged_output)\n\n\n with open(tagged_output, \"w\") as testfilespacygermeval:\n for text, _ in test_data:\n #text = re.split(\" \", line)[0]\n doc = nlp(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n for word in doc:\n line = word.text + \"\\t\" + word.ent_type_ + \"\\n\"\n testfilespacygermeval.write(line)\n postprocess(tagged_output)\n\n\n # save model to output directory\n output_dir = 'classifiers/spacy/spacy_conll_trained'\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n# print(\"Loading from\", output_dir)\n# nlp2 = spacy.load(output_dir)\n# for text, _ in train_data:\n# doc = nlp2(text)\n# print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n# print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n\ndef postprocess(tagged_files):\n i = 1\n output = tagged_files + \".tsv\"\n with open(tagged_files, \"r\") as pre, open(output, \"w\") as post:\n for line in pre:\n line = line.split(\"\\t\")\n # Fixing ignored tokens in germaner conll formated files by stanford ner on lines 64899 and 99279\n #if i == 64899 or i == 99279:\n # post.write(\"<>\" + \"\\t\" + \"O\" + \"\\n\")\n if len(line) >= 1:\n if line[0] == \"####\":\n post.write(\"\")\n elif line[0] == \" \":\n post.write(\"\\n\")\n else:\n post.write(line[0] + \"\\t\" + line[1])\n else:\n print(line, i)\n i += 1\n\nif __name__ == '__main__':\n plac.call(main)\n\n # Expected output:\n # Entities [('Shaka Khan', 'PERSON')]\n # Tokens [('Who', '', 2), ('is', '', 2), ('Shaka', 'PERSON', 3),\n # ('Khan', 'PERSON', 1), ('?', '', 2)]\n # Entities [('London', 'LOC'), ('Berlin', 'LOC')]\n # Tokens [('I', '', 2), ('like', '', 2), ('London', 'LOC', 3),\n # ('and', '', 2), ('Berlin', 'LOC', 3), ('.', '', 2)]","repo_name":"adityakantsharma/NER","sub_path":"spacy_2.py","file_name":"spacy_2.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25490632267","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 8 10:52:14 2018\r\n\r\n@author: 89288\r\n\"\"\"\r\n\r\nimport sys\r\nimport UrlManager\r\nimport HtmlParser\r\nimport DataArranger\r\nfrom selenium import webdriver\r\nimport time\r\n\r\nsys.path.append(r'C:\\Program Files (x86)\\Google\\Chrome\\Application')\r\n\r\nclass CrawlerManager():\r\n \r\n def __init__(self):\r\n '''\r\n 初始化各类\r\n '''\r\n self.UM = UrlManager.UrlManager()\r\n self.HP = HtmlParser.HtmlParser()\r\n self.DA = DataArranger.DataArrange()\r\n self.driver = webdriver.Chrome()\r\n \r\n def judge_address(self):\r\n '''\r\n 选择工作地址\r\n '''\r\n ele_address = self.driver.find_element_by_xpath('//a[@class=\"tab focus\"]')\r\n if ele_address.text == u'深圳站':\r\n ele_address.click()\r\n else:\r\n ele_address = self.driver.find_element_by_xpath('//div[@id=\"changeCityBox\"]/ul/li[6]/a')\r\n ele_address.click()\r\n \r\n def go_search_page(self):\r\n '''\r\n 进入搜索页面\r\n '''\r\n #进入网站\r\n self.driver.get('https://www.lagou.com/')\r\n #最大化浏览器\r\n self.driver.maximize_window()\r\n self.driver.implicitly_wait(10)\r\n #选择工作地址\r\n self.judge_address()\r\n self.driver.implicitly_wait(5)\r\n #输入搜索内容\r\n ele_searchinput = self.driver.find_element_by_id('search_input')\r\n ele_searchinput.send_keys(u'python爬虫')\r\n ele_searchbutton = self.driver.find_element_by_id('search_button')\r\n ele_searchbutton.click()\r\n self.driver.implicitly_wait(10)\r\n\r\n \r\n \r\n def html_crawl(self):\r\n '''\r\n 爬虫调度器\r\n '''\r\n self.go_search_page()\r\n i = 1\r\n while i<6:\r\n self.driver.implicitly_wait(5)\r\n #将网页拉至底部\r\n js = 'window.scrollTo(0,document.body.scrollHeight);'\r\n self.driver.execute_script(js)\r\n #解析网页内容\r\n html_text = self.driver.page_source \r\n contents_tuple = self.HP.parser(html_text)\r\n #存储数据\r\n for content_tuple in contents_tuple:\r\n self.DA.save_job_content(content_tuple[0])\r\n self.DA.save_got_url({'url':content_tuple[1]})\r\n \r\n button = self.driver.find_element_by_class_name('pager_next')\r\n button.click()\r\n \r\n i+=1\r\n time.sleep(6)\r\n \r\nif __name__ == '__main__':\r\n \r\n start_crawler = CrawlerManager()\r\n start_crawler.html_crawl()\r\n \r\n \r\n\r\n","repo_name":"AtrovirensBamboo/CrawlerOfLagouWebsite","sub_path":"CrawlerManager.py","file_name":"CrawlerManager.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13728913927","text":"from django.conf import settings\n\nfrom common import api\nfrom common import normalize\nfrom common import profile\nfrom common import util\nfrom common.tests import ViewTestCase\n\nclass ExploreTest(ViewTestCase):\n\n def test_explore_when_signed_out(self):\n \n l = profile.label('explore_get_public')\n r = self.client.get('/explore')\n l.stop()\n \n self.assertContains(r, \"Latest Public Posts\")\n self.assertTemplateUsed(r, 'explore/templates/recent.html')\n\n def test_explore_when_signed_in(self):\n self.login('popular')\n \n l = profile.label('explore_get_logged_in')\n r = self.client.get('/explore')\n l.stop()\n\n self.assertContains(r, \"Latest Public Posts\")\n self.assertTemplateUsed(r, 'explore/templates/recent.html')\n","repo_name":"jimpick/jaikuengine","sub_path":"explore/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"22663103330","text":"#!/usr/bin/python3\n\n\"\"\"\n Create a class that inherits and is\n a caching system:\n\"\"\"\n\nBaseCaching = __import__('base_caching').BaseCaching\n\n\nclass FIFOCache(BaseCaching):\n \"\"\" FIFOCache class inherits from BaseCaching. \"\"\"\n def __init__(self):\n super().__init__()\n\n def put(self, key, item):\n \"\"\"\n Must assign to the dictionary\n self.cache_data the item value for the key.\n \"\"\"\n if key is None or item is None:\n return\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n dump = list(self.cache_data.keys())\n del self.cache_data[dump[0]]\n print(\"DISCARD: {}\".format(dump[0]))\n\n def get(self, key):\n \"\"\"\n Return the value linked to the key\n \"\"\"\n if key is None or key not in self.cache_data:\n return None\n return self.cache_data[key]\n","repo_name":"sebasgarzons/holbertonschool-backend","sub_path":"0x01-caching/1-fifo_cache.py","file_name":"1-fifo_cache.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"29706370725","text":"import argparse\nfrom collections import defaultdict\n\nimport simplejson\n\n\ndef list_to_facet_fixture(facet_type, items):\n out = []\n\n items = items.split('\\n')\n\n for v in items:\n d = defaultdict()\n d['model'] = 'core.facet'\n d['fields'] = defaultdict()\n d['fields']['facet_type'] = facet_type\n d['fields']['title'] = v.lower()\n out.append(d)\n\n return simplejson.dumps(out)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'facet_type', help='the facet type')\n parser.add_argument(\n 'items', help='a string with a list of items separated by a new line')\n args = parser.parse_args()\n\n print(list_to_facet_fixture(args.facet_type, args.items))\n","repo_name":"kingsdigitallab/egomedia-django","sub_path":"core/fixtures/convert_list_to_fixture.py","file_name":"convert_list_to_fixture.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"29779619417","text":"from typing import Callable\n\nimport torch\nimport torch.nn as nn\n\nfrom torchattack.base import Attack\n\n\nclass FGSM(Attack):\n \"\"\"Fast Gradient Sign Method (FGSM)\n\n From the paper 'Explaining and Harnessing Adversarial Examples'\n https://arxiv.org/abs/1412.6572\n \"\"\"\n\n def __init__(\n self,\n model: nn.Module,\n transform: Callable[[torch.Tensor], torch.Tensor] | None,\n eps: float = 8 / 255,\n clip_min: float = 0.0,\n clip_max: float = 1.0,\n targeted: bool = False,\n device: torch.device | None = None,\n ) -> None:\n \"\"\"Initialize the FGSM attack.\n\n Args:\n model: A torch.nn.Module network model.\n transform: A transform to normalize images.\n eps: Maximum perturbation measured by Linf. Defaults to 8/255.\n clip_min: Minimum value for clipping. Defaults to 0.0.\n clip_max: Maximum value for clipping. Defaults to 1.0.\n targeted: Targeted attack if True. Defaults to False.\n device: Device to use for tensors. Defaults to cuda if available.\n \"\"\"\n\n super().__init__(transform, device)\n\n self.model = model\n self.eps = eps\n self.clip_min = clip_min\n self.clip_max = clip_max\n self.targeted = targeted\n self.lossfn = nn.CrossEntropyLoss()\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"Perform FGSM on a batch of images.\n\n Args:\n x: A batch of images. Shape: (N, C, H, W).\n y: A batch of labels. Shape: (N).\n\n Returns:\n The perturbed images if successful. Shape: (N, C, H, W).\n \"\"\"\n\n # This is written in a way that is similar to iterative methods such as MIM.\n # The original implementation of FGSM is not written in this way.\n delta = torch.zeros_like(x, requires_grad=True)\n\n outs = self.model(self.transform(x + delta))\n loss = self.lossfn(outs, y)\n\n if self.targeted:\n loss = -loss\n\n loss.backward()\n\n # If for some reason delta.grad is None, return the original image.\n if delta.grad is None:\n return x\n\n g_sign = delta.grad.data.sign()\n\n delta.data = delta.data + self.eps * g_sign\n delta.data = torch.clamp(delta.data, -self.eps, self.eps)\n delta.data = torch.clamp(x + delta.data, self.clip_min, self.clip_max) - x\n\n return x + delta\n\n\nif __name__ == \"__main__\":\n from torchattack.utils import run_attack\n\n run_attack(FGSM, {\"eps\": 8 / 255, \"clip_min\": 0.0, \"clip_max\": 1.0})\n","repo_name":"daisylab-bit/torchattack","sub_path":"src/torchattack/fgsm.py","file_name":"fgsm.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"}
+{"seq_id":"9331323557","text":"import asyncio\nfrom url_code.settings import *\nfrom url_code.url_task import URLTask\n\n\ndef url_list_from_file(path=URLS_FILE):\n with open(path, 'r') as f:\n for url in f:\n yield url.strip()\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n tasks = [URLTask(url).url_code() for url in url_list_from_file()]\n asyncio.ensure_future(asyncio.wait(tasks))\n try:\n loop.run_forever()\n finally:\n loop.close()\n","repo_name":"vchilikov/URLcode","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"24825069777","text":"\nprint(\"If you finished print 0\")\nnums = []\n\nwhile True:\n n = int(input(\"Plz enter your numbers:\"))\n if n == 0:\n break\n else:\n nums.append(n)\nprint(nums)\nnums.reverse()\nprint(nums)","repo_name":"Ancksunamun/New-python-course-autumn","sub_path":"04.Assignment(Aydineradat)/04.Reverse/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10077034532","text":"# word = 'banana'\n# count = 0\n# for letter in word:\n# if letter == 'a':\n# count = count + 1\n# print(count)\n\n\ndef lettercount(word,letterChoice) :\n count = 0\n for letter in word: # so this variable will override the function if we use the same word 'letter'\n if letter == letterChoice :\n count = count + 1\n print(count)\n\nlettercount(\"banana\",\"b\")\n\n# str.count(sub[, start[, end]])\n# Return the number of non-overlapping occurrences of substring sub in the range [start, end]. Optional arguments start and end are interpreted as in slice notation.\nword = \"banana\"\nprint(word.count(\"a\"))\n","repo_name":"migladon/Coding-Local","sub_path":"Lesson_6_Count_Letters_In_String_Function.py","file_name":"Lesson_6_Count_Letters_In_String_Function.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37023754516","text":"class Solution:\n def transpose(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n R, C = len(A), len(A[0])\n ans = [[None] * R for _ in range(C)]\n for i, v1 in enumerate(A):\n for j, v2 in enumerate(v1):\n ans[i][j] = A[j][i]\n return ans\n\n\nif __name__ == \"__main__\":\n test = Solution()\n print(test.transpose([[1, 2], [3, 4]]))\n","repo_name":"sdlbp/LeetCode","sub_path":"leetcode-algorithms/867. Transpose Matrix/transpose_matrix.py","file_name":"transpose_matrix.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"30388971736","text":"import pandas as pd\r\nimport sqlite3\r\nimport streamlit as st\r\n\r\n# The layout is changed to fill the whole webpage\r\nst.set_page_config(layout='wide')\r\n\r\n# Header image and title are loaded, the title has formated color (extra)\r\nst.image(\"header.PNG\")\r\nst.markdown('Partner search app
', unsafe_allow_html=True)\r\n\r\n# FR2.5: Connect to the ecsel_database.db and extract the list of countries in a dataframe\r\nconn = sqlite3.connect('ecsel_database.db')\r\ncountries_df = pd.read_sql_query('SELECT * FROM Countries', conn)\r\ncountries_df = pd.DataFrame(countries_df)\r\nacros = list(countries_df[\"Acronym\"])\r\n\r\n# FR2.6: Ask the user to input a country acronym\r\nselected_country_acronym = st.selectbox(\"Please select a country acronym from the dropdown menu:\", acros)\r\ncountry_name = countries_df.loc[countries_df[\"Acronym\"] == selected_country_acronym, \"Country\"].values[0]\r\n\r\n\r\n\r\n# FR2.8: Generate a new dataframe of participants with the total amount of received grants per partner in the\r\n# selected country and include the year information\r\nparticipants_df = pd.read_sql_query(\r\n f\"SELECT shortName, name, activityType, organizationURL, strftime('%Y', startDate) as year, SUM(ecContribution) AS Total_Grants_Received \"\r\n f\"FROM Participants p JOIN Projects pr ON p.projectID = pr.projectID WHERE Country = '{selected_country_acronym}' GROUP BY shortName, name, activityType, \"\r\n f\"organizationURL, year\", conn)\r\n\r\n# FR2.9: Display the generated dataset, in descending order by received grants\r\nst.markdown('Participants in {}
'.format(country_name), unsafe_allow_html=True)\r\nst.write(participants_df.sort_values(\"Total_Grants_Received\", ascending=False))\r\n\r\n# FR2.10: Generate a new project dataframe with the project coordinators from the selected country\r\nproject_coordinators_df = pd.read_sql_query(\r\n f\"SELECT shortName, name, activityType, projectAcronym \"\r\n f\"FROM Participants WHERE Country = '{selected_country_acronym}' AND role = 'coordinator' \"\r\n f\"GROUP BY shortName, name, activityType, projectAcronym\", conn)\r\n\r\n# FR2.11: Display the generated coordinator dataset, in ascending order by shortName\r\nst.markdown('Project coordinators in {}
'.format(country_name), unsafe_allow_html=True)\r\nst.write(project_coordinators_df.sort_values(\"shortName\", ascending=True))\r\n\r\n# FR2.12: Save the generated datasets (participants, and project coordinators) in a CSV file\r\nparticipants_csv = participants_df.to_csv(index=False)\r\nproject_coordinators_csv = project_coordinators_df.to_csv(index=False)\r\n\r\n# FR2.12: Display two download buttons to download the CSV files\r\nst.download_button(\"Download Participants CSV\", data=participants_csv, file_name='participants_country.csv', mime='text/csv')\r\nst.download_button(\"Download Project Coordinators CSV\", data=project_coordinators_csv, file_name='project_coordinators.csv', mime='text/csv')\r\n\r\n\r\n# Extra: Display a bar chart with evolution of received grants of the partners in a country according to their activityType\r\nst.markdown('Evolution of received grants according to activityType and year
', unsafe_allow_html=True)\r\nactivity_type_grants_by_year = participants_df.groupby([\"year\", \"activityType\"])[\"Total_Grants_Received\"].sum().unstack().fillna(0)\r\nst.bar_chart(activity_type_grants_by_year)\r\n\r\n\r\n# Extra: Displaying list/stats of projects according to the project keywords\r\nkeywords = st.multiselect(\"Select project keywords to filter:\", options=['AI', 'IoT', 'Big Data', 'Cloud', 'Security'])\r\nfiltered_projects_df = pd.read_sql_query(\"SELECT * FROM Projects\", conn)\r\n\r\nif keywords:\r\n filtered_projects_df = filtered_projects_df[filtered_projects_df[\"objective\"].apply(lambda x: any(kw in x for kw in keywords))]\r\n\r\nst.markdown('Filtered Projects List
', unsafe_allow_html=True)\r\nst.write(filtered_projects_df)\r\n\r\n# Closing the connection\r\nconn.close()\r\n","repo_name":"100431239/trabajo-prueba","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"29377725352","text":"import random\nfrom typing import Optional\n\nimport jax\nimport tqdm\nimport tree_utils\nfrom optax import LookaheadParams\nfrom x_xy.algorithms import Generator\n\nfrom neural_networks.logging import Logger, n_params\n\n_KILL_RUN = False\n\n\ndef send_kill_run_signal():\n global _KILL_RUN\n _KILL_RUN = True\n\n\nclass TrainingLoopCallback:\n def after_training_step(\n self,\n i_episode: int,\n metrices: dict,\n params: LookaheadParams,\n grads: list[dict],\n sample_eval: dict,\n loggers: list[Logger],\n ) -> None:\n pass\n\n def close(self):\n pass\n\n\nclass TrainingLoop:\n def __init__(\n self,\n key,\n generator: Generator,\n params,\n opt_state,\n step_fn,\n loggers: list[Logger],\n callbacks: list[TrainingLoopCallback] = [],\n cycle_seed: Optional[int] = None,\n ):\n self._key = key\n self.i_episode = -1\n self._generator = generator\n self._params = params\n self._opt_state = opt_state\n self._step_fn = step_fn\n self._loggers = loggers\n self._callbacks = callbacks\n self._seeds = list(range(cycle_seed)) if cycle_seed else None\n if cycle_seed is not None:\n random.seed(1)\n\n self._sample_eval = generator(jax.random.PRNGKey(0))\n batchsize = tree_utils.tree_shape(self._sample_eval, 0)\n T = tree_utils.tree_shape(self._sample_eval, 1)\n\n for logger in loggers:\n if isinstance(params, LookaheadParams):\n fast_params = params.fast\n else:\n fast_params = params\n logger.log(dict(n_params=n_params(fast_params), batchsize=batchsize, T=T))\n\n @property\n def key(self):\n if self._seeds is not None:\n seed_idx = self.i_episode % len(self._seeds)\n if seed_idx == 0:\n random.shuffle(self._seeds)\n return jax.random.PRNGKey(self._seeds[seed_idx])\n else:\n self._key, consume = jax.random.split(self._key)\n return consume\n\n def run(self, n_episodes: int = 1, close_afterwards: bool = True):\n for _ in tqdm.tqdm(range(n_episodes)):\n self.step()\n\n if _KILL_RUN:\n break\n\n if close_afterwards:\n self.close()\n\n def step(self):\n self.i_episode += 1\n\n sample_train = self._sample_eval\n self._sample_eval = self._generator(self.key)\n\n self._params, self._opt_state, loss, debug_grads = self._step_fn(\n self._params, self._opt_state, sample_train[0], sample_train[1]\n )\n\n metrices = {}\n metrices.update(loss)\n\n for callback in self._callbacks:\n callback.after_training_step(\n self.i_episode,\n metrices,\n self._params,\n debug_grads,\n self._sample_eval,\n self._loggers,\n )\n\n for logger in self._loggers:\n logger.log(metrices)\n\n return metrices\n\n def close(self):\n for callback in self._callbacks:\n callback.close()\n\n for logger in self._loggers:\n logger.close()\n","repo_name":"SimiPixel/neural_networks","sub_path":"neural_networks/rnno/training_loop.py","file_name":"training_loop.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"9318825708","text":"student_tickets = 0\r\nstandard_tickets = 0\r\nkid_tickets = 0\r\n\r\nwhile True:\r\n film_name = input()\r\n\r\n if film_name == \"Finish\":\r\n break\r\n\r\n capacity = int(input())\r\n sold_tickets = 0\r\n\r\n while sold_tickets < capacity:\r\n ticket_type = input()\r\n\r\n if ticket_type == \"End\":\r\n break\r\n if ticket_type == \"student\":\r\n student_tickets += 1\r\n elif ticket_type == \"standard\":\r\n standard_tickets += 1\r\n else:\r\n kid_tickets += 1\r\n\r\n sold_tickets += 1\r\n\r\n print(f\"{film_name} - {sold_tickets /capacity * 100:.2f}% full.\")\r\n\r\ntotal_tickets = standard_tickets + student_tickets + kid_tickets\r\n\r\nprint(f\"Total tickets: {total_tickets}\")\r\nprint(f\"{student_tickets / total_tickets * 100:.2f}% student tickets.\")\r\nprint(f\"{standard_tickets / total_tickets * 100:.2f}% standard tickets.\")\r\nprint(f\"{kid_tickets / total_tickets * 100:.2f}% kids tickets.\")","repo_name":"Petar-Koprinkov/Python_Basic_SoftUni","sub_path":"nested_loops_exercise/cinema_tickets.py","file_name":"cinema_tickets.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27490335339","text":"'''\nGiven a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.\nYou may assume no duplicates in the array.\n\nExample 1:\nInput: [1,3,5,6], 5\nOutput: 2\n\nExample 2:\nInput: [1,3,5,6], 2\nOutput: 1\n\nExample 3:\nInput: [1,3,5,6], 7\nOutput: 4\n\nExample 4:\nInput: [1,3,5,6], 0\nOutput: 0\n\n'''\n\n\n\n\nclass Solution:\n def searchInsert(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n # Approach #1\n # if target in nums:\n # return nums.index(target)\n # else:\n # for index,num in enumerate(nums):\n # if target < nums[0]:\n # return 0\n # if target > nums[-1]:\n # return len(nums)\n # if num < target and nums[index+1] > target:\n # return index+1\n\n\n\n # Approach #2\n # left , right = 0 , len(nums) - 1\n # if target < nums[left]:\n # return 0\n # if target > nums[right]:\n # return len(nums)\n # if target in nums:\n # return nums.index(target)\n # while right - 1 > left :\n # mid = ( left + right ) // 2\n # if target < nums[mid]:\n # right = mid\n # elif nums[mid] < target:\n # left = mid\n # if nums[mid] == target:\n # return mid\n # return left + 1\n\n\n # Approach #3 简化方法二\n i, j = 0, len(nums) - 1\n while i < j:\n mid = (j - i) // 2 + i\n if nums[mid] < target:\n i = mid + 1\n else:\n j = mid - 1\n return i+1 if nums[i] < target else i # 最后要写nums[i]而不是nums[0],因为需要处理二分查找失败的输入。\n\n\n\n","repo_name":"OnlyChristmas/leetcode","sub_path":"Python/search-insert-position.py","file_name":"search-insert-position.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"}
+{"seq_id":"29624484494","text":"\"\"\"\nMore runners for continuous RL algorithms can be added here.\n\"\"\"\nimport DDPG_runner\n\n# Modify these constants if needed.\nEPISODE_LIMIT = 10000\nSTEPS_PER_EPISODE = 200 # How many steps to run each episode (changing this messes up the solved condition)\n\nif __name__ == '__main__':\n DDPG_runner.run()\n","repo_name":"mattwfranchi/Webots.HPC","sub_path":"Simulations/cartpole_continuous/controllers/supervisor_manager/supervisor_manager.py","file_name":"supervisor_manager.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"}
+{"seq_id":"18224148714","text":"# author: @iamtienng\n# import models\nfrom models.people import People\nfrom models.rating import Rating\nfrom models.movie import Movie\n\n\nclass User(People):\n def __init__(self):\n self.type = \"user\"\n\n def create_rating(self, movie: Movie, ratingValue):\n newRating = Rating(self.id, movie.movieId)\n if (newRating.create(ratingValue)):\n return newRating.get_info()\n return False\n\n def update_rating(self, movie: Movie, newRatingValue):\n rating = Rating(self.id, movie.movieId)\n if (rating.update(newRatingValue)):\n return rating.get_info()\n return False\n\n def delete_rating(self, movie: Movie):\n rating = Rating(self.id, movie.movieId)\n if (rating.delete()):\n return True\n return False\n","repo_name":"iamtienng/movie-recommendation-system","sub_path":"mrsbb/BackEnd/mrsbb-be/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28584923233","text":"import z3\n\nfrom pdb import set_trace\n\nimport string\nimport toolz\nimport enum\nimport logging\nimport itertools\nfrom typing import Iterable, Tuple, Set\n\nfrom . import parsing\nfrom .parsing import (PToken, EMPTY, CHAR, DOT, STAR, BAR, CONCAT, GROUP, BACKREF, CARET, DOLLAR)\nfrom .preprocessing import (convert_stars, convert_bars, flatten_regex, remove_path, convert_to_encoded_symbols)\n\n\nlogger = logging.getLogger(__name__)\n\n# set of nonzero lengths with which to approximate star\n# the zero length is included automatically\nDEFAULT_STAR_LENGTHS = [4]\n\n# space is not valid URL char.\n# pound (#) is invalid in domain. It gets replaced with /# in Chrome in the URL bar.\n# question (?) is also invalid in domain, and gets replaced with /? in Chrome URL bar.\nDEFAULT_DOT_CHARSET = 'abcdefghijklmnop012345' + \"/\"\n\n\nclass RegexStringExpr:\n scratch_var_cnt = 0\n ignore_wildcards = z3.Bool('ignore_wildcards')\n\n def _gen_string_var(self):\n x = z3.String('_x_{}'.format(self.string_var_count))\n self.string_var_count += 1\n return x\n\n def _gen_bool_var(self):\n b = z3.Bool('_b_{}'.format(self.bool_var_count))\n self.bool_var_count += 1\n return b\n\n def __init__(self, regex: str, unknown: z3.StringSort(),\n word_choice_cutoff=10,\n dot_charset=DEFAULT_DOT_CHARSET,\n star_lengths: Iterable[int] = DEFAULT_STAR_LENGTHS,\n symbolic=False):\n \"\"\"\n Compiles Regex to Z3 String expressions\n\n :param dot_charset: Characters that the DOT metachar can match. This should be limited to\n valid URL characters, or can be set to a taint marker.\n\n \"\"\"\n\n self.unknown = unknown\n self.star_lengths = star_lengths\n\n self.string_var_count = 0\n self.bool_var_count = 0\n\n self.symbolic = symbolic\n\n _parser = parsing.RegexParser()\n parse_result = _parser.parse(regex)\n self.parsing_errors = parse_result['errors']\n regex_0 = flatten_regex(parse_result['root'])\n regex_1 = remove_path(regex_0)\n regex_2 = convert_stars(regex_1, star_lengths)\n regex_3 = convert_bars(regex_2, cutoff=word_choice_cutoff)\n if symbolic:\n regex_4, self.symbols = convert_to_encoded_symbols(regex_3, {})\n else:\n regex_4, self.symbols = regex_3, {}\n self.regex = regex_4\n assert self.regex\n self.groups = parse_result['groups']\n self.backrefs = parse_result['backrefs']\n self.dot_charset = dot_charset\n\n def _sat_expr(self, regex: Tuple) -> Tuple[z3.SeqRef, z3.BoolRef, z3.BoolRef, z3.BoolRef]:\n \"\"\"\n\n :returns: string that matches regex, constraint on string,\n whether string contains caret, whether string contains dollar\n\n Whether there is a caret or dollar needs to be tracked because they imply constraints on\n neighboring strings to the one returned.\n\n \"\"\"\n\n ty = regex[0]\n\n if ty == EMPTY:\n return (z3.StringVal(''), z3.BoolVal(True), z3.BoolVal(False), z3.BoolVal(False))\n\n elif ty == CHAR:\n return (z3.StringVal(regex[1]), z3.BoolVal(True), z3.BoolVal(False), z3.BoolVal(False))\n\n elif ty == DOT:\n x = self._gen_string_var()\n constraint = z3.And(z3.Implies(self.ignore_wildcards, x == z3.StringVal('')),\n z3.Implies(z3.Not(self.ignore_wildcards),\n z3.Or(*(x == z3.StringVal(y) for y in self.dot_charset))))\n return (x, constraint, z3.BoolVal(False), z3.BoolVal(False))\n\n elif ty == STAR:\n # STAR should have been approximated with something else during preprocessing.\n raise NotImplementedError\n\n elif ty == BAR:\n ys, constraints_list, carets_list, dollars_list = zip(*map(self._sat_expr, regex[1:]))\n\n x = self._gen_string_var()\n x_constraint = z3.Or(*(z3.And(x == y, y_constraint)\n for y, y_constraint in zip(ys, constraints_list)))\n\n return (x, x_constraint, z3.Or(*carets_list), z3.Or(*dollars_list))\n elif ty == CONCAT:\n\n ys, y_constraints, carets_list, dollars_list = zip(*map(self._sat_expr, regex[1:]))\n\n x = z3.Concat(*ys)\n\n start_constraints = (\n z3.Implies(b, z3.Length(y) == 0)\n for ii, b in enumerate(carets_list)\n for y in ys[:ii])\n\n end_constraints = (\n z3.Implies(b, z3.Length(y) == 0)\n for ii, b in enumerate(dollars_list)\n for y in ys[ii+1:]\n )\n\n x_constraint = z3.And(*toolz.concatv(y_constraints, start_constraints, end_constraints))\n\n return (x, x_constraint, z3.Or(*carets_list), z3.Or(*dollars_list))\n\n elif ty == GROUP:\n # backrefs not supported\n idx = regex[1] - 1 # not used currently; would be used to implement backrefs\n inner = regex[2]\n return self._sat_expr(inner)\n\n elif ty == BACKREF:\n raise NotImplementedError\n\n elif ty == CARET:\n assert len(regex) == 1\n b = self._gen_bool_var()\n return (z3.StringVal(''), b, b, z3.BoolVal(False))\n\n elif ty == DOLLAR:\n assert len(regex) == 1\n b = self._gen_bool_var()\n return (z3.StringVal(''), b, z3.BoolVal(False), b)\n\n else:\n raise ValueError(\"Unknown regex_parser type '%s'\" % repr(ty))\n\n def re_expr(self):\n ss, expr, carets, dollars = self._sat_expr(self.regex)\n return z3.simplify(z3.And(self.unknown == ss, expr))\n","repo_name":"OktaSecurityLabs/hack_url_re","sub_path":"hack_url_re/compiling.py","file_name":"compiling.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"}
+{"seq_id":"18838993987","text":"import os\nimport argparse\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.preprocessing import normalize\n\nimport matplotlib.patches as mpatches\n\nfrom activ.nmf.viz import bases_heatmap\nfrom activ import TrackTBIFile\n\ndef get_col(m, feats, colname):\n col = None\n idx = np.where(feats == varname)[0]\n if len(idx) > 0:\n idx = idx[0]\n col = m[:, idx]\n return col\n\ndef add_name(func):\n def ret(ax, col):\n func(ax, col)\n ax.set_title(col.name)\n return ret\n\n\n@add_name\ndef make_barplot(ax, col):\n vals, counts = np.unique(col, return_counts=True)\n ax.bar([str(v) for v in vals], counts, color='gray')\n\n@add_name\ndef make_barplot_ints(ax, col):\n vals, counts = np.unique(col, return_counts=True)\n ax.bar([str(int(v)) for v in vals], counts, color='gray')\n\n@add_name\ndef make_age(ax, col):\n counts, edges = np.histogram(col, bins=np.arange(1,9)*10)\n names = [f'{b}-{e}' for b, e in zip(edges[:-1], edges[1:])]\n ax.bar(names, counts, color='gray')\n\n@add_name\ndef make_gcs(ax, col):\n vals, counts = np.unique(col, return_counts=True)\n vals = vals.astype(int)\n mask = vals >= 13\n ax.bar(vals[mask], counts[mask], color='blue', label='Mild')\n mask = np.logical_and(np.logical_not(mask), vals >= 9)\n ax.bar(vals[mask], counts[mask], color='green', label='Moderate')\n mask = np.logical_and(np.logical_not(mask), vals < 9)\n ax.bar(vals[mask], counts[mask], color='red', label='Severe')\n ax.set_xticks(np.arange(1,8)*2+1)\n ax.set_xticklabels(np.arange(1,8)*2+1)\n ax.legend()\n\n@add_name\ndef make_yes_no(ax, col):\n vals, counts = np.unique(col, return_counts=True)\n vals = ['No', 'Yes']\n ax.bar(vals, counts, color='gray')\n\n\ndef make_race(ax, df):\n ar = np.zeros(len(df), dtype='U5')\n ar[df['RACE_3CAT_Black'] == 1] = 'Black'\n ar[df['RACE_3CAT_White'] == 1] = 'White'\n ar[df['RACE_3CAT_Other'] == 1] = 'Other'\n make_barplot(ax, pd.Series(data=ar, name='Race'))\n\n@add_name\ndef make_hist(ax, col):\n ax.hist(col, bins=40, color='gray')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('tbifile', help='the TRACK-TBI file to use', type=TrackTBIFile)\nparser.add_argument('-o', '--outdir', type=str, help='the directory to save figures to', default='.')\nparser.add_argument('-f', '--format', type=str, choices=['png', 'pdf'], help='the directory to save figures to', default='pdf')\nargs = parser.parse_args()\n\ntbifile = args.tbifile\n\n\n\ndf = pd.DataFrame(data=np.concatenate([tbifile.biomarkers, tbifile.outcomes], axis=1),\n columns=np.concatenate([tbifile.biomarker_features, tbifile.outcome_features]),\n index=tbifile.patient_ids)\n\nfigure, axes = plt.subplots(2, 5, figsize=(25, 10))\nmake_age(axes[0, 0], df['Age'])\nmake_race(axes[0, 1], df)\nmake_gcs(axes[0, 2], df['admGCS'])\nmake_yes_no(axes[0, 3], df['ICPMonitorYesNo'])\nmake_yes_no(axes[0, 4], df['PMH_Psych_ANY'])\n\nmake_barplot_ints(axes[1, 0], df['GOSE_OverallScore3M'])\nmake_barplot_ints(axes[1, 1], df['RPQNausea_6mo'])\nmake_barplot_ints(axes[1, 2], df['NeuroOverallRating3mo'])\nmake_hist(axes[1, 3], df['CVLTTrial1To5StandardScore_6mo'])\nmake_yes_no(axes[1, 4], df['PTSD_6mo'])\n\nplt.savefig(f'{args.outdir}/tracktbi_summary.{args.format}')\n","repo_name":"BouchardLab/ML_4_prec_prognosis","sub_path":"bin/plot_summary.py","file_name":"plot_summary.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22269793050","text":"def find(arr,t):\n l=0\n r=len(arr)\n while l bool:\n if targetmatrix[-1][-1]:\n return False\n else:\n l=0\n r=len(matrix)\n # first find the row where target may be found \n while l 0:\r\n print(f\"\\r{len(aligned_scanners)} of {len(unaligned_scanners)} complete...\", end='')\r\n new_aligned_scanners = []\r\n for positions in unchecked_aligned_scanners:\r\n aligned_scanner_indices = set()\r\n for i in remaining_scanner_indices:\r\n offset = get_offset(positions, unaligned_scanners[i])\r\n if offset:\r\n aligned_scanner = [apply_offset(offset, x) for x in unaligned_scanners[i]]\r\n aligned_scanners.append(aligned_scanner)\r\n scanner_locations.append(offset[1])\r\n new_aligned_scanners.append(aligned_scanner)\r\n aligned_scanner_indices.add(i)\r\n for i in aligned_scanner_indices:\r\n remaining_scanner_indices.remove(i)\r\n unchecked_aligned_scanners = new_aligned_scanners\r\n print(\"\\r \\r\", end='')\r\n return aligned_scanners, scanner_locations\r\n\r\n\r\ndef get_offset(original_positions, new_positions):\r\n for rot_x, rot_y, rot_z in ((x, y, z) for x in range(4) for y in range(4) for z in range(4)):\r\n rotated_positions = [rotate_multiple(p, rot_x, rot_y, rot_z) for p in new_positions]\r\n for original_position in original_positions:\r\n for rotated_position in rotated_positions:\r\n offset_translation = minus_positions(original_position, rotated_position)\r\n num_matching_points = sum(1 if add_positions(rp, offset_translation) in original_positions\r\n else 0 for rp in rotated_positions)\r\n if num_matching_points >= 12:\r\n return (rot_x, rot_y, rot_z), offset_translation\r\n return None\r\n\r\n\r\ndef apply_offset(offset, p):\r\n return add_positions(rotate_multiple(p, offset[0][0], offset[0][1], offset[0][2]), offset[1])\r\n\r\n\r\ndef rot_90(p, axis):\r\n if axis == 0:\r\n return p[0], p[2], -p[1]\r\n if axis == 1:\r\n return -p[2], p[1], p[0]\r\n if axis == 2:\r\n return -p[1], p[0], p[2]\r\n raise Exception(f\"rot_90: bad axis '{axis}'\")\r\n\r\n\r\ndef rotate_multiple(p, x_rot_num, y_rot_num, z_rot_num):\r\n for _ in range(x_rot_num):\r\n p = rot_90(p, 0)\r\n for _ in range(y_rot_num):\r\n p = rot_90(p, 1)\r\n for _ in range(z_rot_num):\r\n p = rot_90(p, 2)\r\n return p\r\n\r\n\r\ndef add_positions(x, y):\r\n return x[0] + y[0], x[1] + y[1], x[2] + y[2]\r\n\r\n\r\ndef minus_positions(x, y):\r\n return x[0] - y[0], x[1] - y[1], x[2] - y[2]\r\n\r\n\r\ndef manhattan_distance(x, y):\r\n return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"PenguinInPyjamas/advent-of-code-2021","sub_path":"19/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5637159901","text":"import time\n\nletters = {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"}\nlength_mappings = {2: [1], 3: [7], 4: [4], 7: [8], 5: [2, 3, 5], 6: [0, 6, 9]}\nsection_mappings = {\n 0: {0, 1, 2, 4, 5, 6},\n 1: {2, 5},\n 2: {0, 2, 3, 4, 6},\n 3: {0, 2, 3, 5, 6},\n 4: {1, 2, 3, 5},\n 5: {0, 1, 3, 5, 6},\n 6: {0, 1, 3, 4, 5, 6},\n 7: {0, 2, 5},\n 8: {0, 1, 2, 3, 4, 5, 6},\n 9: {0, 1, 2, 3, 5, 6},\n}\n\n\ndef read_input(path):\n entries = []\n\n with open(path, \"r\") as file:\n for line in file.readlines():\n patterns, output = line.strip().split(\" | \")\n\n entries.append(\n {\"patterns\": patterns.split(\" \"), \"output\": output.split(\" \")}\n )\n\n return entries\n\n\ndef solve_silver(puzzle):\n count = 0\n for entry in puzzle:\n for value in entry[\"output\"]:\n if len(value) in {2, 3, 4, 7}:\n count += 1\n\n return count\n\n\ndef constrain(value, sections):\n length = len(value)\n for digit in length_mappings[length]:\n digit_sections = section_mappings[digit]\n tmp = [x for x in sections]\n\n for section in digit_sections:\n tmp[section] = tmp[section] & value\n\n for section in {0, 1, 2, 3, 4, 5, 6} - digit_sections:\n tmp[section] = tmp[section] - value\n\n singletons = [frozenset(x) for x in tmp if len(x) == 1]\n if set() not in tmp and len(set(singletons)) == len(singletons):\n return tmp\n\n return None\n\n\ndef solve_gold(puzzle):\n results = []\n\n for entry in puzzle:\n known_patterns = set(\n [frozenset(x) for x in entry[\"patterns\"] if len(x) in {2, 3, 4, 7}]\n )\n unknown_patterns = set(\n [frozenset(x) for x in entry[\"patterns\"] if x not in known_patterns]\n )\n\n sections = [letters for _ in range(len(letters))]\n\n for value in known_patterns:\n sections = constrain(value, sections)\n\n for value in unknown_patterns:\n sections = constrain(value, sections)\n\n digit_sections = {}\n\n for digit in section_mappings:\n digit_sections[digit] = set(\n [list(sections[section]).pop() for section in section_mappings[digit]]\n )\n\n result = \"\"\n for output in entry[\"output\"]:\n for digit in digit_sections:\n if set(output) == digit_sections[digit]:\n result += str(digit)\n\n results.append(result)\n\n return sum([int(x) for x in results])\n\n\ndef main():\n path = \"input.txt\"\n puzzle = read_input(path)\n\n start = time.time()\n silver = solve_silver(puzzle)\n print(f\"Silver: {silver}\")\n print(f\"Delta: {time.time() - start}\\n\")\n\n start = time.time()\n gold = solve_gold(puzzle)\n print(f\"Gold: {gold}\")\n print(f\"Delta: {time.time() - start}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Kipwisp/advent-of-code-2021","sub_path":"day_8/day_8.py","file_name":"day_8.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4549119457","text":"from __future__ import unicode_literals\nimport frappe\nfrom datetime import date\nfrom frappe.defaults import get_defaults\nfrom .client import get_data\n\n\ndef sync():\n\tbiotrack_employee_list = []\n\tcompany = get_defaults().get(\"company\")\n\n\tfor biotrack_employee in get_biotrack_employees():\n\t\tsync_employee(biotrack_employee, company, biotrack_employee_list)\n\n\treturn len(biotrack_employee_list)\n\n\ndef sync_employee(biotrack_employee, company, biotrack_employee_list):\n\temployee_name = biotrack_employee.get(\"employee_name\")\n\temployee_id = biotrack_employee.get(\"employee_id\")\n\ttransactionid = biotrack_employee.get(\"transactionid\")\n\n\temployee = lookup_employee(employee_name, employee_id)\n\tif employee:\n\t\tif not (frappe.flags.force_sync or False) and employee.external_transaction_id == transactionid:\n\t\t\treturn False\n\n\telse:\n\t\temployee = frappe.get_doc({'doctype': 'Employee'})\n\n\tdate_of_birth = date(\n\t\tint(biotrack_employee.get(\"birthyear\")),\n\t\tint(biotrack_employee.get(\"birthmonth\")),\n\t\tint(biotrack_employee.get(\"birthday\"))\n\t)\n\n\tdate_of_joining = date(\n\t\tint(biotrack_employee.get(\"hireyear\")),\n\t\tint(biotrack_employee.get(\"hiremonth\")),\n\t\tint(biotrack_employee.get(\"hireday\"))\n\t)\n\n\tnaming_series = frappe.get_meta(\"Employee\").get_options(\"naming_series\") or \"EMP/\"\n\n\temployee.update({\n\t\t\"naming_series\": naming_series,\n\t\t\"employee_name\": employee_name,\n\t\t\"status\": \"Active\",\n\t\t\"external_id\": employee_id,\n\t\t\"external_transaction_id\": transactionid,\n\t\t\"company\": company,\n\t\t\"date_of_birth\": date_of_birth,\n\t\t\"date_of_joining\": date_of_joining,\n\t})\n\n\temployee.flags.ignore_mandatory = True\n\temployee.save()\n\n\tbiotrack_employee_list.append(biotrack_employee.get(\"employee_id\"))\n\tfrappe.db.commit()\n\n\ndef lookup_employee(name, external_id):\n\t\"\"\"Lookup by name or BioTrack ID\"\"\"\n\tconditions, values = frappe.db.build_conditions({\"external_id\": external_id, \"employee_name\": name})\n\tconditions = \" or \".join(conditions.split(\" and \"))\n\tresult = frappe.db.sql(\"\"\"select `name`\n\t\t\t\t\tfrom `tab%s` where %s\"\"\" % (\"Employee\", conditions), values, as_dict=True)\n\n\tif result:\n\t\treturn frappe.get_doc(\"Employee\", result[0])\n\n\treturn None\n\n\ndef get_biotrack_employees(active=1):\n\tdata = get_data('sync_employee', {'active': active})\n\n\treturn data.get('employee') if bool(data.get('success')) else []\n","repo_name":"webonyx/erpnext_biotrack","sub_path":"erpnext_biotrack/biotrackthc/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"}
+{"seq_id":"41663471828","text":"import re\nimport git\n\ndef map_changes_to_methods(repo_path, file_path):\n \"\"\"Map code changes to method names in the given file of the given Git repository.\"\"\"\n # Clone the repository and retrieve the commits that changed the file\n repo = git.Repo(repo_path)\n file_commits = list(repo.iter_commits(paths=file_path))\n\n # Define regular expressions for the beginning and end of a method\n method_start = re.compile(r'^\\s*def\\s+(\\w+)\\(.*\\):\\s*$')\n method_end = re.compile(r'^\\s*(return|\\s*$|\\s*#.*)')\n\n # Iterate over the commits and find the lines that belong to each method\n method_lines_by_commit = {}\n for commit in file_commits:\n # Get the code of the commit\n code = commit.tree[file_path].data_stream.read().decode()\n\n # Iterate over the lines of code and find the lines that belong to each method\n method_lines = []\n in_method = False\n for line in code.split('\\n'):\n if method_start.match(line):\n # This line starts a new method\n in_method = True\n method_name = method_start.search(line).group(1)\n method_lines = [line]\n elif in_method:\n # We're inside a method, so check if this line ends it\n if method_end.match(line):\n in_method = False\n method_lines_by_commit.setdefault(commit.hexsha, {}).setdefault(method_name, []).extend(method_lines)\n method_lines = []\n else:\n method_lines.append(line)\n\n # Remove the temporary repository\n # repo.close()\n # git.util.rmtree('temp_repo')\n\n return method_lines_by_commit\n","repo_name":"BhanuRutwik/hackathon","sub_path":"codeToMethodMapping.py","file_name":"codeToMethodMapping.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6022088136","text":"import re\nfrom abc import abstractmethod, ABCMeta\nfrom typing import Any\nfrom inspect import getmembers\n\nclass FHIRResource(metaclass=ABCMeta):\n def __init__(self, resource_type: str):\n self.resourceType = resource_type\n\n @abstractmethod\n def create_entry(self):\n pass\n\n def create_id(self, text: str):\n return re.sub('[^a-zA-Z0-9\\-]', '-', text)\n\n @abstractmethod\n def __getstate__(self) -> dict[str, Any]:\n json_dict = dict(\n [t for t in getmembers(self) if\n not ((t[0].startswith(\"_\") and not (t[0].startswith(\"_reserved_\"))) or callable(t[1]) or t[1] is None or (\n isinstance(t[1], list) and len(t[1]) == 0))])\n keys_to_be_rename = [key for key in json_dict if key.startswith(\"_reserved_\")]\n for key in keys_to_be_rename:\n json_dict[key[10:]] = json_dict.pop(key)\n return json_dict","repo_name":"health-tag/fhir-transformer","sub_path":"fhir_transformer/FHIR/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"69973573607","text":"#!/usr/bin/env python3\n# Code Cell 1.\n# Import the dweepy module that is a collection of functions that make it \n# easier to communicate with dweet.io \nimport dweepy\n\n# Import the GPIO modules to control the GPIO pins of the Raspberry Pi\n# Uncomment the following only when testing on a physcial Rasberry Pi\n# Comment the following when testing on a Raspbian VM\n#import RPi.GPIO as GPIO\n\n# Import the Mock GPIO modules to control the Mock GPIO pins of the Raspberry Pi\n# Uncomment the following when testing on a Raspbian VM\n# Comment the following when testing on a physcial Rasberry Pi\nimport EmulateGPIO as GPIO\n\n# Import to clear cell output with code\nfrom IPython.display import clear_output\n\n# Import the time module to control the timing of your application (e.g. add delay, etc.)\nimport time\n\nimport os\n\n_=os.system(\"clear\")\n\n# Code Cell 2.\n#Setup hardware\n# Set the desired pin numbering scheme:\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n#Create variables for the GPIO PINs the LEDs are connected to\n# ============================================\n# the PIN of the green LED\nGreenLEDPin = 12 #Add values: add the pin number for the green LED\n# the PIN of the red LED\nRedLEDPin = 13 #Add values: add the pin number for the red LED\n#=============================================\n\n# Setup the direction of the GPIO pins - either INput or OUTput \n# The PINs that connect LEDs must be set to OUTput mode:\nGPIO.setup(GreenLEDPin, GPIO.OUT)\nGPIO.setup(RedLEDPin, GPIO.OUT)\nprint()\n\n#Code Cell 4.\nwhile True:\n # Asks the user to select the LED. Put the response into a variable.\n lit = input(\"Which LED should be lit? (r)ed or (g)reen? (q) to quit: \")\n\n # convert the input to lowercase and put it in another variable.\n lit1 = lit.lower()\n\n #Set the LED state based on the user input\n if lit1 == \"r\": #If the user chose the red LED\n print(\"Activate Red LED\")\n GPIO.output(GreenLEDPin, False) # False = set 0V on the pin\n GPIO.output(RedLEDPin, True) # True = set 3.3V on the pin\n print()\n\n elif lit1 == \"g\": #If the user chose the green LED\n print(\"Activate Green LED\")\n GPIO.output(GreenLEDPin, True) # True = set 3.3V on the pin\n GPIO.output(RedLEDPin, False) #False = set 0V on the pin\n print()\n\n elif lit1 == \"q\": #If the user chose to quit the program\n print(\"Deactivate All LEDs\")\n GPIO.output(GreenLEDPin, False) # True = set 3.3V on the pin\n GPIO.output(RedLEDPin, False) #False = set 0V on the pin\n exit()\n\n else: #If the user entered something other than r, g, or q.\n print(\"Please enter r for red, g for green, or q to quit.\")\n","repo_name":"yztan120/COSTIOTProjectSourceCodes","sub_path":"Source codes/scripts (1)/docker_check_led (1).py","file_name":"docker_check_led (1).py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"594243839","text":"## paste to run here http://www.codeskulptor.org/\nimport simplegui\nimport random\nimport math\n\n#Global Variables\n \npair1 = [0,1,2,3,4,5,6,7] #first pair of cards\npair2 = [0,1,2,3,4,5,6,7] #second pair of cards\nfull_deck = [] #here we randomly add the two pair of cards, wich we then shuffle\nexposed = []\nstate = 0 #keep track of current state of the game\ncard1 = [0,0]\ncard2 = [0,0]\nmoves = 0\n\n\n# helper function to initialize globals\ndef init():\n global exposed, state, moves\n exposed = [False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False]\n state = 0 #keep track of current state of the game\n label.set_text(\"Moves = 0\")\n moves = 0\n #The program creates the deck of cards comprised of\n #the 2 original decks. It fills the full deck while\n #eliminating the elements of the 2 original decks.\n #this simplifies checking for repeated members.\n\n #elements of pair1 being inserted\n while len(pair1)!=0:\n full_deck.append( pair1.pop( random.randrange( len( pair1))))\n \n #elements of pair2 being inserted\n while len(pair2)!=0:\n full_deck.append( pair2.pop( random.randrange( len( pair2))))\n \n random.shuffle(full_deck)\n \n return \n\n \n# define event handlers\ndef mouseclick(pos):\n # add game state logic here\n #To know and pair the position of the canvas clicked and the card that is being clicked\n #we divide the pos[0] vs the number 50 and then we round down the result. that will give\n #us the index number o the card on wich we are clicking\n global exposed,state, moves, card1, card2\n \n y = math.floor( pos[0] / 50)\n \n #the program keeps track of how many cards are face up\n #and the moment when it needs to face down and up cards\n #it uses two lists in wich to store the number flipped and its position\n \n if exposed[y] == False and state == 0:\n exposed[y] = True\n card1[0] = full_deck[y]\n card1[1] = y\n state = 1\n elif exposed[y] == False and state == 1:\n exposed[y] = True \n card2[0] = full_deck[y]\n card2[1] = y\n moves+=1\n if card1[0] == card2[0]:\n exposed[card1[1]] = \"Found\"\n exposed[y] = \"Found\"\n state = 2 \n\n elif exposed[y] == \"Found\":\n state = state\n else:\n v = 0\n while v <=15: \n if exposed[v] == True and exposed[v] != \"Found\":\n exposed[v] = False\n v+=1\n \n exposed[y] = True\n state = 1\n card1[0] = full_deck[y]\n card1[1] = y\n \n label.set_text(\"Moves = \"+str(moves))\n return\n \n \n# cards are logically 50x100 pixels in size \ndef draw(canvas):\n #here the programs each member of the full_deck\n #in a nice orderly fashion through a for loop\n #if the exposed index is true, the number is shown. Else, the back of the card appears\n global full_deck,exposed\n positionator = 0\n index= 0\n for x in full_deck:\n if exposed[index] == True or exposed[index] == \"Found\":\n canvas.draw_text(str(x), (1+positionator, 80), 100, \"White\")\n canvas.draw_line((0+positionator,0), (0+positionator,100), 1,\"Red\")\n canvas.draw_line((50+positionator,0), (50+positionator,100), 1,\"Red\")\n positionator+=50\n elif exposed[index] == False or exposed[index] == \"Found\":\n canvas.draw_polygon([(0+positionator,0),(50+positionator,0),(50+positionator,100),(0+positionator,100)], 1, \"Red\", \"Green\")\n positionator+=50\n index+=1\n return\n\n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", 800, 100)\nframe.add_button(\"Restart\", init)\nlabel = frame.add_label(\"Moves =0\")\n\n# initialize global variables\ninit()\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nframe.start()","repo_name":"Cheng-F/Python-MiniProject","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37356351435","text":"#!/usr/bin/python3\n\n# Licensed under AGPLv3+\n\nfrom tkinter import *\nfrom tkinter.colorchooser import askcolor\nimport tkinter.ttk as ttk\nimport threading\nimport copy\nimport time\n\nfrom lib.i18n import _\nfrom lib.config import *\n\n\n# Internally used class\nclass counter(object):\n def __init__(self, v=0):\n if type(v) != int:\n v = 0\n self.v = v\n\n def get(self):\n return self.v\n\n def inc(self):\n self.v += 1\n return self.v - 1\n\n\n# GUI configurator class\nclass guiconfigurator:\n def __init__(self, cfg):\n self.cfg = cfg\n guithread = threading.Thread(target=self.__create_window, args=())\n guithread.start()\n\n def __close_window(self):\n self.running = False\n\n # Main function of the class\n def __create_window(self):\n\n self.note = \"\" # Notice message, be filled later\n self.savedentcolor = None # Default entry background color\n self.running = True\n\n root = Tk()\n root.protocol(\"WM_DELETE_WINDOW\", self.__close_window)\n nb = ttk.Notebook(root)\n nb.pack(fill='both', expand='yes')\n\n # Create used tabs\n misctab = ttk.Frame(nb)\n limtag = ttk.Frame(nb)\n\n misctab.pack(fill=BOTH, expand=True)\n limtag.pack(fill=BOTH, expand=True)\n\n nb.add(misctab, text=_(\"Common settings\"))\n nb.add(limtag, text=_(\"Warning limits\"))\n\n #\n # Common settings\n #\n row = counter()\n Label(misctab, text=_(\"Camera number\")).grid(\n row=row.get(), column=0, sticky=W)\n self.camera_dev = Entry(misctab, width=4)\n self.camera_dev.grid(row=row.inc(), column=1, sticky=W)\n\n # Once remember default Entry background color\n self.savedentcolor = self.camera_dev.cget(\"highlightbackground\")\n\n Label(misctab, text=_(\"Video width\")).grid(\n row=row.get(), column=0, sticky=W)\n self.img_w = Entry(misctab, width=4)\n self.img_w.grid(row=row.inc(), column=1, sticky=W)\n\n Label(misctab, text=_(\"Video height\")).grid(\n row=row.get(), column=0, sticky=W)\n self.img_h = Entry(misctab, width=4)\n self.img_h.grid(row=row.inc(), column=1, sticky=W)\n\n Label(misctab, text=_(\"FPS\")).grid(row=row.get(), column=0, sticky=W)\n self.fps = Entry(misctab, width=4)\n self.fps.grid(row=row.inc(), column=1, sticky=W)\n\n self.showcap = BooleanVar()\n Checkbutton(\n misctab, text=_(\"Show picture\"), variable=self.showcap,\n onvalue=1, offvalue=0).grid(row=row.inc(), column=0, sticky=W)\n\n self.writestat = BooleanVar()\n Checkbutton(\n misctab, text=_(\"Write stat file\"), variable=self.writestat,\n onvalue=1, offvalue=0).grid(row=row.inc(), column=0, sticky=W)\n\n self.beepwarn = BooleanVar()\n Checkbutton(\n misctab, text=_(\"Beep on warning (depend on system settings)\"), variable=self.beepwarn,\n command=self.endis,\n onvalue=1, offvalue=0).grid(row=row.inc(), column=0, sticky=W)\n\n self.showwarn = BooleanVar()\n Checkbutton(\n misctab, text=_(\"Show warning\"), variable=self.showwarn,\n command=self.endis,\n onvalue=1, offvalue=0).grid(row=row.inc(), column=0, sticky=W)\n\n Label(misctab, text=_(\"Warning window size\")).grid(\n row=row.get(), column=0, sticky=W)\n self.wsize = Entry(misctab, width=4)\n self.wsize.grid(row=row.inc(), column=1, sticky=W)\n\n Label(misctab, text=_(\"Warning window X pos\")).grid(\n row=row.get(), column=0, sticky=W)\n self.wx = Entry(misctab, width=4)\n self.wx.grid(row=row.inc(), column=1, sticky=W)\n\n Label(misctab, text=_(\"Warning window Y pos\")).grid(\n row=row.get(), column=0, sticky=W)\n self.wy = Entry(misctab, width=4)\n self.wy.grid(row=row.inc(), column=1, sticky=W)\n\n Button(misctab, text=_(\"Warning color\"), command=self.wcolor).grid(\n row=row.inc(), column=0)\n\n self.note = Label(misctab, text=self.note)\n self.note.grid(row=row.inc(), column=0, sticky=W)\n\n Button(misctab, text=_(\"Reset\"), command=self.cfg2gui).grid(\n row=row.get(), column=0)\n Button(misctab, text=_(\"Save\"), command=self.save_guicfg).grid(\n row=row.inc(), column=1)\n\n #\n # Limits settings\n #\n row = counter()\n\n Label(limtag, text=_(\"Warning maximum emotions\")).grid(\n row=row.inc(), column=0, columnspan=8, sticky=W+E)\n\n # Checkbuttons for max limit\n self.wmax = []\n self.wmaxen = []\n column = counter()\n for i, ign in enumerate(self.cfg.wmax):\n self.wmaxen.append(None)\n self.wmaxen[i] = BooleanVar()\n Checkbutton(\n limtag, text=emotions[i], variable=self.wmaxen[i],\n onvalue=1, offvalue=0, command=self.endis).grid(\n row=row.get(), column=column.inc())\n # Scales for max limit\n row.inc()\n column = counter()\n for i, ign in enumerate(self.cfg.wmax):\n self.wmax.append(Scale(\n limtag, orient=VERTICAL, length=300,\n from_=5, to=-5, tickinterval=0.0, resolution=0.1))\n self.wmax[i].grid(row=row.get(), column=column.inc())\n\n row.inc()\n\n Label(limtag, text=_(\"Warning miminum emotions\")).grid(\n row=row.inc(), column=0, columnspan=8, sticky=W+E)\n # Checkbuttons for min limit\n self.wmin = []\n self.wminen = []\n column = counter()\n for i, ign in enumerate(self.cfg.wmin):\n self.wminen.append(None)\n self.wminen[i] = BooleanVar()\n Checkbutton(\n limtag, text=emotions[i], variable=self.wminen[i],\n onvalue=1, offvalue=0, command=self.endis).grid(\n row=row.get(), column=column.inc())\n # Scales for min limit\n row.inc()\n column = counter()\n for i, ign in enumerate(self.cfg.wmin):\n self.wmin.append(Scale(\n limtag, orient=VERTICAL, length=300, from_=5, to=-5,\n tickinterval=0.0, resolution=0.1))\n self.wmin[i].grid(row=row.get(), column=column.inc())\n\n row.inc()\n\n Button(limtag, text=_(\"Reset\"), command=self.cfg2gui).grid(\n row=row.get(), column=0)\n Button(limtag, text=_(\"Save\"), command=self.save_guicfg).grid(\n row=row.inc(), column=1)\n\n self.cfg2gui() # Fill with actual values\n\n self.endis() # Update enable/disable status of interface elementes\n\n while self.running is True:\n root.update_idletasks()\n root.update()\n time.sleep(0.05)\n\n # Scroll enabler/disabler\n def endis(self,):\n if (self.showwarn.get() is True):\n self.wsize.configure(state=NORMAL)\n self.wx.configure(state=NORMAL)\n self.wy.configure(state=NORMAL)\n else:\n self.wsize.configure(state=DISABLED)\n self.wx.configure(state=DISABLED)\n self.wy.configure(state=DISABLED)\n\n for i, ign in enumerate(self.cfg.wmax):\n if (self.wmaxen[i].get() is True):\n self.wmax[i].config(state=NORMAL, relief=RAISED)\n else:\n self.wmax[i].config(state=DISABLED, relief=FLAT)\n\n for i, ign in enumerate(self.cfg.wmin):\n if (self.wminen[i].get() is True):\n self.wmin[i].config(state=NORMAL, relief=RAISED)\n else:\n self.wmin[i].config(state=DISABLED, relief=FLAT)\n\n # Color chooser\n def wcolor(self,):\n colors = askcolor(initialcolor=self.wcolor, title=_(\"Color Chooser\"))\n self.wcolor = colors[0]\n\n # Action on save button\n def save_guicfg(self) -> None:\n errorflag, newcfg = self.get_gui_cfg()\n\n # Show warning if needed\n if errorflag == 0:\n writecfg(newcfg)\n self.cfg = newcfg\n self.note.config(text=_(\"Config saved\"), fg='green')\n\n # Update cfg structure with configuration from GUI\n def get_gui_cfg(self):\n newcfg = copy.copy(self.cfg)\n errorflag = 0\n\n # Common settings\n try:\n newcfg.camera_dev = int(self.camera_dev.get())\n self.camera_dev.configure(\n highlightbackground=self.savedentcolor,\n highlightcolor=self.savedentcolor)\n except Exception as e:\n self.camera_dev.configure(\n highlightbackground='red', highlightcolor='red')\n errorflag = 1\n\n try:\n newcfg.img_w = int(self.img_w.get())\n self.img_w.configure(\n highlightbackground=self.savedentcolor,\n highlightcolor=self.savedentcolor)\n except Exception as e:\n self.img_w.configure(\n highlightbackground='red', highlightcolor='red')\n errorflag = 1\n\n try:\n newcfg.img_h = int(self.img_h.get())\n self.img_h.configure(\n highlightbackground=self.savedentcolor,\n highlightcolor=self.savedentcolor)\n except Exception as e:\n self.img_h.configure(\n highlightbackground='red', highlightcolor='red')\n errorflag = 1\n\n try:\n newcfg.fps = float(self.fps.get())\n self.fps.configure(\n highlightbackground=self.savedentcolor,\n highlightcolor=self.savedentcolor)\n except Exception as e:\n self.fps.configure(highlightbackground='red', highlightcolor='red')\n errorflag = 1\n\n newcfg.showcap = self.showcap.get()\n newcfg.writestat = self.writestat.get()\n newcfg.showwarn = self.showwarn.get()\n newcfg.beepwarn = self.beepwarn.get()\n\n try:\n newcfg.wsize = int(self.wsize.get())\n self.wsize.configure(\n highlightbackground=self.savedentcolor,\n highlightcolor=self.savedentcolor)\n except Exception as e:\n self.wsize.configure(\n highlightbackground='red', highlightcolor='red')\n errorflag = 1\n\n try:\n newcfg.wx = int(self.wx.get())\n self.wx.configure(\n highlightbackground=self.savedentcolor,\n highlightcolor=self.savedentcolor)\n except Exception as e:\n self.wx.configure(\n highlightbackground='red', highlightcolor='red')\n errorflag = 1\n\n try:\n newcfg.wy = int(self.wy.get())\n self.wy.configure(\n highlightbackground=self.savedentcolor,\n highlightcolor=self.savedentcolor)\n except Exception as e:\n self.wy.configure(highlightbackground='red', highlightcolor='red')\n errorflag = 1\n\n newcfg.wcolor = self.wcolor\n\n # Emotion limits\n for i, ign in enumerate(self.cfg.wmax):\n val = self.wmaxen[i].get()\n if val == 1:\n newcfg.wmax[i] = self.wmax[i].get()\n else:\n newcfg.wmax[i] = None\n\n for i, ign in enumerate(self.cfg.wmin):\n val = self.wminen[i].get()\n if val == 1:\n newcfg.wmin[i] = self.wmin[i].get()\n else:\n newcfg.wmin[i] = None\n\n # Show warning if needed\n if errorflag == 0:\n self.note.config(text=_(\"Config validated\"), fg='green')\n else:\n self.note.config(text=_(\"Errors detected\"), fg='red')\n\n return errorflag, newcfg\n\n # Fill gui fields with the read config\n def cfg2gui(self):\n # Common settings\n self.camera_dev.delete(0, END)\n self.camera_dev.insert(0, str(self.cfg.camera_dev))\n self.img_w.delete(0, END)\n self.img_w.insert(0, str(self.cfg.img_w))\n self.img_h.delete(0, END)\n self.img_h.insert(0, str(self.cfg.img_h))\n self.fps.delete(0, END)\n self.fps.insert(0, str(self.cfg.fps))\n self.showcap.set(1 if self.cfg.showcap else 0)\n self.writestat.set(1 if self.cfg.writestat else 0)\n self.showwarn.set(1 if self.cfg.showwarn else 0)\n self.beepwarn.set(1 if self.cfg.beepwarn else 0)\n self.wsize.delete(0, END)\n self.wsize.insert(0, str(self.cfg.wsize))\n self.wx.delete(0, END)\n self.wx.insert(0, str(self.cfg.wx))\n self.wy.delete(0, END)\n self.wy.insert(0, str(self.cfg.wy))\n self.wcolor = self.cfg.wcolor\n\n # Emotion limits\n for i, val in enumerate(self.cfg.wmax):\n if val is not None:\n self.wmaxen[i].set(1)\n self.wmax[i].set(val)\n else:\n self.wmaxen[i].set(0)\n self.wmax[i].set(0)\n\n for i, val in enumerate(self.cfg.wmin):\n if val is not None:\n self.wminen[i].set(1)\n self.wmin[i].set(val)\n else:\n self.wminen[i].set(0)\n self.wmin[i].set(0)\n\n# vi: tabstop=4 shiftwidth=4 expandtab\n","repo_name":"ioctl-user/sevimon","sub_path":"lib/guicfg.py","file_name":"guicfg.py","file_ext":"py","file_size_in_byte":13412,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"}
+{"seq_id":"72085906729","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 21 15:52:41 2021\n\n@author: Gabri\n\"\"\"\n\nf = open('input.txt', 'r')\n\ndef str_to_list(string):\n return [c == '#' for c in string.strip()]\n\nalgo = str_to_list(f.readline())\nnext(f)\n\nimage = list(map(str_to_list, f))\nwidth, height = len(image[0]), len(image)\n\next_val = False\n\ndef neb_to_bool(x, y, val, scale = (-1,0,1)):\n return algo[int(''.join('01'[val[x+i][y+j]] for i in scale for j in scale), 2)]\n\nfor _ in range(2): #50 for part2\n mock = [[ext_val] * (width+4), [ext_val] * (width+4)]\n for r in range(height):\n mock.append([ext_val]*2 + image[r] + [ext_val] * 2)\n mock += [[ext_val] * (width+4), [ext_val] * (width+4)]\n image = []\n for c in range(1, width+3):\n image.append([neb_to_bool(c,r,mock) for r in range(1, height+3)])\n \n width += 2\n height += 2\n ext_val = algo[511 * ext_val]\n \nprint(sum(sum(l) for l in image))","repo_name":"GabrielReynes/AdventOfCode","sub_path":"2021/Day20/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13175736989","text":"#!/usr/bin/env python\n# -*- coding:utf-8 mode:python; tab-width:4; indent-tabs-mode:nil; py-indent-offset:4 -*-\n##\n\n\"\"\"\n test_remote_execution\n ~~~~~~~~~~~~~~\n\n Test job execution over ansible, using ssh transport to 127.0.0.1. Use\n NWChem and MOPAC 7 as back ends for test purposes because they are the\n best supported via Debian packages.\n\"\"\"\nimport sys\nimport unittest\nimport geoprep\nfrom tests.common_testcode import runSuite\nfrom adapters import nwchem, mopac7\n\nclass RETestCase(unittest.TestCase):\n def setUp(self):\n self.G = geoprep.Geotool()\n self.C = nwchem.NWChem()\n self.C2 = mopac7.Mopac7()\n\n def test_nwchem_energy_scf_methane(self):\n #very basic minimal basis set test for methane\n expected_energy = -39.976642\n methane = self.G.make_fragment(\"C\")\n methane.set_basis_name(\"3-21G\")\n\n #local execution\n job = self.C.make_energy_job(methane, \"hf:rhf\")\n job.run()\n self.assertAlmostEqual(expected_energy, job.energy, places=5)\n\n #over ansible\n job2 = self.C.make_energy_job(methane, \"hf:rhf\")\n job2.run(host=\"127.0.0.1\")\n self.assertAlmostEqual(expected_energy, job2.energy, places=5)\n\n def test_nwchem_bad_input_error(self):\n #job will terminate abnormally\n methane = self.G.make_fragment(\"C\")\n methane.set_basis_name(\"3-21G\")\n job = self.C.make_energy_job(methane, \"hf:rhf\")\n\n #introduce an error in the input deck: misspell rhf as thf\n job.deck = job.deck.replace(\"RHF\", \"THF\")\n\n #local execution\n self.assertEqual(\"begin\", job.runstate)\n job.run()\n self.assertEqual(\"error\", job.runstate)\n\n #over ansible\n job2 = self.C.make_energy_job(methane, \"hf:rhf\")\n job2.deck = job.deck.replace(\"RHF\", \"THF\")\n self.assertEqual(\"begin\", job2.runstate)\n job2.run(host=\"127.0.0.1\")\n self.assertEqual(\"error\", job2.runstate)\n\n def test_mopac_energy_pm3_methylium(self):\n expected_energy = -5.641481\n expected_hof = 0.408868\n \n methylium = self.G.make_system(\"[CH3+]\")\n\n #local execution\n job = self.C2.make_energy_job(methylium, \"semiempirical:pm3\")\n job.run()\n self.assertAlmostEqual(expected_energy, job.energy, places=5)\n self.assertAlmostEqual(expected_hof, job.heat_of_formation, places=5)\n\n #over ansible\n job2 = self.C2.make_energy_job(methylium, \"semiempirical:pm3\")\n job2.run(host=\"127.0.0.1\")\n self.assertAlmostEqual(expected_energy, job2.energy, places=5)\n self.assertAlmostEqual(expected_hof, job2.heat_of_formation, places=5)\n\n def test_ansible_bad_host(self):\n #try to use ansible on a badly configured host and confirm error state\n methylium = self.G.make_system(\"[CH3+]\")\n job = self.C2.make_energy_job(methylium, \"semiempirical:pm3\")\n result = job.ansible_run(\"shell\", \"ls\", \"notarealhost\")\n self.assertEqual(1, len(job.messages))\n err = job.messages[0]\n #A variety of possible error messages; assert that we got\n #at least one of them\n fails = [\"unknown error\", \"sshpass\", \"SSH Error\"]\n contains = [f in err for f in fails]\n self.assertTrue(True in contains)\n\n def test_ansible_missing_host(self):\n #try to use ansible on a non-configured host and get an exception\n methylium = self.G.make_system(\"[CH3+]\")\n job = self.C2.make_energy_job(methylium, \"semiempirical:pm3\")\n self.assertRaises(KeyError, job.ansible_run, \"shell\", \"ls\",\n \"nosuchhost\")\n\ndef runTests():\n try:\n test_name = sys.argv[1]\n \n except IndexError:\n test_name = None\n\n if test_name:\n result = runSuite(RETestCase, name = test_name)\n\n else:\n result = runSuite(RETestCase)\n\n return result\n\nif __name__ == '__main__':\n runTests()\n","repo_name":"mattbernst/polyhartree","sub_path":"tests/test_remote_execution.py","file_name":"test_remote_execution.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"1050672281","text":"import cv2\nimport time\nimport numpy\nimport sys\nfrom . import controller\n# Camera 0 is the integrated web cam on my netbook\ncamera_port = 0\n\n# Number of frames to throw away while the camera adjusts to light levels\nramp_frames = 30\n \n# Now we can initialize the camera capture object with the cv2.VideoCapture class.\n# All it needs is the index to a camera port.\ncamera = cv2.VideoCapture(camera_port)\n\n# constants\nBLACK_THRESHOLD = 20 # red, blue, and green value less than this\nPOINT_THRESHOLD = 50 # percentage of any given point that is black\n\n\n'''def go_straight():\n print(\"CONTINUE ON\")\n\n\ndef turn_left():\n print(\"TURN LEFT\")\n\n\ndef turn_right():\n print(\"TURN RIGHT\")'''\n\n\ndef do_the_gray_thing(im):\n for x in range(300, 310):\n for y in range(325, 335):\n im[x, y] = 200\n for y in range(400, 410):\n im[x, y] = 200\n for y in range(475, 485):\n im[x, y] = 200\n for y in range(250, 260):\n im[x, y] = 200\n for y in range(175, 185):\n im[x, y] = 200\n\n for x in range(390, 400):\n for y in range(325, 335):\n im[x, y] = 200\n for y in range(400, 410):\n im[x, y] = 200\n for y in range(475, 485):\n im[x, y] = 200\n for y in range(250, 260):\n im[x, y] = 200\n for y in range(175, 185):\n im[x, y] = 200\n return im\n\n\ndef do_the_drive_thing(im):\n # point zero is top left, nine is bottom right. figure it out you dick. it's like a book.\n #\n # p0 p1 p2 p3 p4\n #\n # p5 p6 p7 p8 p9\n #\n p0 = 0\n p1 = 0\n p2 = 0\n p3 = 0\n p4 = 0\n p5 = 0\n p6 = 0\n p7 = 0\n p8 = 0\n p9 = 0\n # top row\n for y in range(300, 310):\n # top center\n for x in range(325, 335):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p2 += 1\n # top middle right\n for x in range(400, 410):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p3 += 1\n # top middle left\n for x in range(250, 260):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p1 += 1\n # top right\n for x in range(475, 485):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p4 += 1\n # top left\n for x in range(175, 185):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p0 += 1\n\n #bottom row\n for y in range(390, 400):\n # bottom middle\n for x in range(325, 335):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p7 += 1\n # bottom middle right\n for x in range(400, 410):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p8 += 1\n # bottom middle left\n for x in range(250, 260):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p6 += 1\n # bottom right\n for x in range(475, 485):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p9 += 1\n # bottom left\n for x in range(175, 185):\n r, g, b = im[x, y]\n if r < BLACK_THRESHOLD and b < BLACK_THRESHOLD and g < BLACK_THRESHOLD:\n p5 += 1\n\n if p2 > POINT_THRESHOLD or p7 > POINT_THRESHOLD:\n go_straight()\n elif (p0 > POINT_THRESHOLD and p1 > POINT_THRESHOLD) or \\\n (p1 > POINT_THRESHOLD and p5 > POINT_THRESHOLD) or \\\n (p5 > POINT_THRESHOLD and p6 > POINT_THRESHOLD) or \\\n (p6 > POINT_THRESHOLD and p0 > POINT_THRESHOLD):\n turn_left()\n elif (p3 > POINT_THRESHOLD and p4 > POINT_THRESHOLD) or \\\n (p4 > POINT_THRESHOLD and p8 > POINT_THRESHOLD) or \\\n (p8 > POINT_THRESHOLD and p9 > POINT_THRESHOLD) or \\\n (p9 > POINT_THRESHOLD and p4 > POINT_THRESHOLD):\n turn_right()\n\n\n# Captures a single image from the camera and returns it in PIL format\ndef get_image():\n # read is the easiest way to get a full image out of a VideoCapture object.\n retval, im = camera.read()\n return im\n \n# Ramp the camera - these frames will be discarded and are only used to allow v4l2\n# to adjust light levels, if necessary\nfor i in xrange(ramp_frames):\n temp = get_image()\nwhile True:\n try:\n # Take the actual image we want to keep\n camera_capture = get_image()\n f = \"test_image.png\"\n \n # A nice feature of the imwrite method is that it will automatically choose the\n # correct format based on the file extension you provide. Convenient!\n # cv2.imwrite(f, camera_capture)\n \n # You'll want to release the camera, otherwise you won't be able to create a new\n # capture object until your script exits\n # del(camera)\n im = camera_capture\n print(im.shape)\n height, width, channel = im.shape\n # cv2.imshow('image color', im)\n # im2 = im\n # cv2.imshow('image', im)\n ifilter = 170\n im[im >= ifilter] = 255\n im[im < ifilter] = 0\n print(im[395,330])\n\n # do the other thing here\n do_the_drive_thing(im)\n im = do_the_gray_thing(im)\n\n # im.save(\"/home/pi/Desktop/filteredimg.jpeg\")\n from PIL import Image\n img = Image.fromarray(im)\n # path is relative because a macbook isn't a pi\n # img.save(\"filteredimg.jpeg\")\n cv2.imshow('image color removal', im)\n \n # time.sleep(5)\n key = cv2.waitKey(5)\n if key != -1:\n cv2.destroyAllWindows()\n exit(0)\n else:\n cv2.destroyAllWindows()\n\n except:\n import webbrowser\n e = sys.exc_info()[0]\n url = \"http://stackoverflow.com/search?q=[python]+\" + e\n webbrowser.open(url, 2)\n exit(0)\n","repo_name":"BenCovey/Self-Driving-Car-Capstone","sub_path":"imagecv.py","file_name":"imagecv.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36234917395","text":"from fastapi import APIRouter, status, HTTPException\nfrom schemas.schema_card import Card, UpdateCard\nfrom db.database import create_card, fetch_card_by_id, remove_card\nfrom db.repository.users import remove_user\n\nrouter = APIRouter()\n\n\n@router.post(\"/add\", status_code=status.HTTP_201_CREATED)\nasync def add_card(card: Card):\n response = await create_card(card.dict())\n if response:\n return response\n raise HTTPException(\n status_code= status.HTTP_409_CONFLICT,\n detail=\"There was an issue when adding the card to the database.\",\n )\n \n\n#@router.put(\"/update/{card_id}\", response_model=Card)\n#async def update_card(card_id: str, card: Card):\n# stored_card_data = await fetch_card_by_id(card_id)\n# mutable_card = dict(stored_card_data.pop(\"_id\"))\n# stored_card_model = Card(**mutable_card)\n# update_data = card.dict(exclude_unset=True)\n# update_card = stored_card_model.copy(update=update_data)\n \n\n@router.delete(\"/delete/{card_id}\", status_code=status.HTTP_202_ACCEPTED)\nasync def delete_card(card_id: str):\n response = await remove_card(card_id)\n if response:\n return response\n raise HTTPException(\n status_code= status.HTTP_409_CONFLICT,\n detail=f\"There was an issue when deleting the card {card_id} in the database.\",\n )\n\n@router.delete(\"/delete/{user_id}\", status_code=status.HTTP_202_ACCEPTED)\nasync def delete_card(user_id: str):\n response = await remove_user(user_id)\n if response:\n return response\n raise HTTPException(\n status_code= status.HTTP_409_CONFLICT,\n detail=f\"There was an issue when deleting the user {user_id} in the database.\",\n )","repo_name":"jacksontbailey/MyHeroAPI","sub_path":"backend/apis/internal/admin_paths.py","file_name":"admin_paths.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27967490226","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n#https://www.acmicpc.net/problem/2697\n# for _ in range(one()):\n# n = list(inputing())\n# if n == sorted(n,reverse=True):\n# print(\"BIGGEST\")\n# else:\n# index = 0\n# for i in range(len(n)-1):\n# a,b = n[i],n[i+1]\n# if a and to put message error\n return form_args\n\n def form_valid(self, form):\n\n '''\n This function overrides logic of form validation\n :param form: user form output after submit (post)\n :return: raise or HTTP Response Redirect\n '''\n\n try:\n\n return super(DatasetCreateView, self).form_valid(form) # saves dataset on database, and the return is declared to self.object, that contains dataset instance created, and i can use in get_success url, because it's called after save on database, if i want i could declare return of save to another object (e.g. dataset_saved) and redirect after on form valid to a link, and can use dataset_saved with dataset created object\n except:\n raise\n\n def form_invalid(self, form):\n print(form.errors)\n form = DatasetCreationForm.DatasetCreationForm(request=self.request) # reset form --> reset errors to only appear once\n return super(DatasetCreateView, self).form_invalid(form)\n\n def get_success_url(self):\n\n '''\n This function defines the success path after form validation\n :return: reverse of a path\n '''\n\n try:\n storage = messages.get_messages(request=self.request)\n storage.used = True # clean old messages\n messages.add_message(self.request, messages.INFO, config.SUCCESS_DATASET_CREATION) # add message\n\n #pass kwargs with pk to ListSpecificDataset --> important need to kwargs because, get queryset method gets pk via kwargs, but i also can send via args, passing a tuple\n # kwargs = {\"pk\" : self.object.id}\n # path = reverse(\"datasets:ListaDatasetByID\", kwargs=kwargs)\n\n # for now, i only redirect again to list of Dataset's, but if necessary the link before redirects to detailview of the dataset created\n path = reverse('datasets:listaDatasets')\n\n return path\n except:\n raise\n","repo_name":"bundasmanu/ProjetoMestrado","sub_path":"datasets/views/DatasetCreateView.py","file_name":"DatasetCreateView.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14026062196","text":"# from mlxtend.preprocessing import TransactionEncoder\n# from mlxtend.frequent_patterns import apriori, association_rules\n# import pandas as pd\n#\n# # CSDL giao dịch\n# # transactions = [\n# # [\"A\", \"B\", \"C\"],\n# # [\"A\", \"B\"],\n# # [\"A\", \"D\", \"E\"],\n# # [\"E\", \"D\"],\n# # [\"E\", \"C\"],\n# # [\"A\", \"D\", \"E\"]\n# #\n# # ]\n#\n# transactions = [\n# [\"A\", \"B\", \"C\"],\n# [\"A\", \"B\"],\n# [\"A\", \"D\",\"E\"],\n# [\"E\", \"D\"],\n# [\"E\", \"C\"],\n# [\"A\", \"D\", \"E\"]\n# ]\n#\n# # Áp dụng thuật toán apriori để tìm tập phổ biến\n# te = TransactionEncoder()\n# te_ary = te.fit_transform(transactions)\n# df = pd.DataFrame(te_ary, columns=te.columns_)\n# frequent_sets = apriori(df, min_support=0.3, use_colnames=True)\n# print(\"Frequent sets:\")\n# print(frequent_sets)\n#\n# # Tìm các luật kết hợp từ tập phổ biến\n# rules = association_rules(frequent_sets, metric=\"confidence\", min_threshold=1)\n# print(\"Rules:\")\n# print(rules[['antecedents', 'consequents', 'confidence']])\n\nfrom itertools import combinations\n\n\ndef get_frequent_itemsets(data, minsupp):\n # Tính độ hỗ trợ tối thiểu dựa trên tổng số giao dịch và ngưỡng minsupp\n minsupp_count = len(data) * minsupp\n\n # Tạo danh sách tất cả các mục và tập hạng mục\n all_items = sorted(list(set(item for transaction in data for item in transaction)))\n itemsets = [[item] for item in all_items]\n\n # Tìm các tập phổ biến bằng thuật toán Apriori\n frequent_itemsets = []\n k = 1\n while itemsets:\n # Tính hỗ trợ cho từng tập hạng mục\n itemsets_counts = {frozenset(itemset): 0 for itemset in itemsets}\n for transaction in data:\n for itemset in itemsets_counts.keys():\n if itemset.issubset(transaction):\n itemsets_counts[itemset] += 1\n\n # Lọc ra các tập phổ biến\n frequent_itemsets.extend(\n [list(itemset) for itemset, count in itemsets_counts.items() if count >= minsupp_count])\n\n # Tạo các tập hạng mục mới kết hợp từ các tập hạng mục hiện tại\n itemsets = []\n for i, itemset in enumerate(sorted(itemsets_counts.keys())):\n for j in range(i + 1, len(itemsets_counts)):\n other_itemset = sorted(list(itemsets_counts.keys())[j])\n new_itemset = sorted(list(itemset.union(other_itemset)))\n if len(new_itemset) == k + 1 and new_itemset not in itemsets:\n itemsets.append(new_itemset)\n k += 1\n\n return frequent_itemsets\n\ndata = {\n 'T1': ['A', 'B', 'C'],\n 'T2': ['A', 'B'],\n 'T3': ['A', 'D', 'E'],\n 'T4': ['E', 'D'],\n 'T5': ['E', 'C'],\n 'T6': ['A', 'D', 'E'],\n}\n\nminsupp = 0.3\n\nfrequent_itemsets = get_frequent_itemsets(list(data.values()), minsupp)\n\nprint(frequent_itemsets)","repo_name":"Nguyen-huan/dataMining","sub_path":"bai_2/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14815891861","text":"from bs4 import BeautifulSoup as bs\nimport requests\nfrom csv import writer\n\n\nurl = 'https://www.pararius.com/apartments/amsterdam?ac=1'\npage = requests.get(url)\n \nsoup = bs(page.content, 'html.parser')\nlists = soup.find_all('section', class_='listing-search-item')\nprint(lists)\nwith open(\"/home/student/static/housing.csv\", 'w', newline='') as f:\n ghostwriter = writer(f)\n header = ['Title', 'Subtitle', 'Price', 'Area']\n ghostwriter.writerow(header)\n for list in lists:\n title = list.find('a', class_=\"listing-search-item__link--title\").text\n subtitle = list.find('div', class_='listing-search-item__sub-title').text\n price = list.find('div', class_='listing-search-item__price').text\n area = list.find('li', class_='illustrated-features__item--surface-area').text\n info = [title, subtitle, price, area]\n ghostwriter.writerow(info)\n\n#agent = {\"User-Agent\":'Mozilla/5.0 (Windows NT 6.3; WOW64) Chrome/59.0.3071.115'}\n","repo_name":"jimlabbe/mycode","sub_path":"tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7241556707","text":"def move(vec, st, en):\r\n \"\"\" move vec[en:len(vec)-1] to v[st]\"\"\"\r\n i = st\r\n while i <= st + len(vec) - 1 - en:\r\n vec[i] = vec[i + en - st]\r\n i += 1\r\n\r\n\r\ndef delete_duplicates(vec):\r\n \"\"\" vec is sorted array, we need to delete the duplicates \"\"\"\r\n\r\n idx_intervals = []\r\n\r\n i = 0\r\n nb = 0\r\n while i < len(vec):\r\n j = i + 1\r\n while j < len(vec) and vec[j] == vec[i]:\r\n j += 1\r\n\r\n if j - 1 > i:\r\n idx_intervals.append((i, j - 1))\r\n nb += (j-1-i)\r\n\r\n i = j\r\n\r\n for idx in idx_intervals[::-1]:\r\n move(vec, idx[0], idx[1])\r\n\r\n for j in range(len(vec) - 1, len(vec) - 1 - nb, -1):\r\n vec[j] = 0\r\n return len(vec) - nb\r\n\r\n\r\ndef delete_duplicates2(vec):\r\n \"\"\" vec is sorted array, we need to delete the duplicates \"\"\"\r\n\r\n i = 0\r\n nb = 0\r\n n = len(vec) - 1\r\n while i <= n:\r\n j = i + 1\r\n while j <= n and vec[j] == vec[i]:\r\n j += 1\r\n if j - 1 > i:\r\n nb += (j-1-i)\r\n move(vec, i, j-1)\r\n n -= (j-1-i)\r\n i += 1\r\n\r\n for j in range(len(vec) - 1, len(vec) - 1 - nb, -1):\r\n vec[j] = 0\r\n\r\n return len(vec) - nb\r\n\r\n\r\ndef delete_duplicates3(vec):\r\n\r\n if not vec:\r\n return 0\r\n if len(vec) == 1:\r\n return 1\r\n v = 1\r\n for i in range(len(vec)):\r\n if vec[v-1] != vec[i]:\r\n vec[v] = vec[i]\r\n v += 1\r\n return v\r\n\r\n\r\ndef main():\r\n vec = [1, 1, 1, 2, 3, 4, 4, 5, 7, 8, 8, 8, 8]\r\n #vec =[0,0]\r\n i = delete_duplicates3(vec)\r\n print(i)\r\n print(vec)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"sarath-mutnuru/EPI_python_codes","sub_path":"5.5_delete_duplicates.py","file_name":"5.5_delete_duplicates.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10729399886","text":"#!/usr/bin/env python3\n\n\nimport yaml\nimport argparse\nimport subprocess\nimport os\nimport signal\nimport time\nimport pwd\nimport jinja2\nimport codecs\nimport sys\n\n\n__author__ = \"Anoop P Alias\"\n__copyright__ = \"Copyright Anoop P Alias\"\n__license__ = \"GPL\"\n__email__ = \"anoopalias01@gmail.com\"\n\n\ninstallation_path = \"/opt/nDeploy\" # Absolute Installation Path\nbackend_config_file = installation_path+\"/conf/backends.yaml\"\n\n\n# Function defs\n\n\n# Define a function to silently remove files\ndef silentremove(filename):\n try:\n os.remove(filename)\n except OSError:\n pass\n\n\ndef control_php_fpm(trigger):\n if \"PHP\" in backend_data_yaml_parsed:\n php_backends_dict = backend_data_yaml_parsed[\"PHP\"]\n if trigger == \"autofix\":\n conf_list = os.listdir(\"/var/cpanel/users\")\n for user in conf_list:\n try:\n pwd.getpwnam(user)\n except KeyError:\n silentremove(\"/opt/nDeploy/php-fpm.d/\"+user+\".conf\")\n silentremove(\"/opt/nDeploy/secure-php-fpm.d/\"+user+\".conf\")\n if user != 'nobody' and user != 'system':\n user_home = pwd.getpwnam(user).pw_dir\n user_shell = pwd.getpwnam(user).pw_shell\n if user_shell == '/usr/local/cpanel/bin/noshell':\n print(('Please set Jailed shell for user: '+user))\n else:\n print(('VirtfsJailFix:: '+user))\n subprocess.call('su - '+user+' -c \"touch '+user_home+'/public_html\"', shell=True)\n elif trigger == \"start\":\n silentremove(\"/opt/nDeploy/php-fpm.d/nobody.conf\")\n subprocess.call(\"sysctl -q -w net.core.somaxconn=4096\", shell=True)\n subprocess.call(\"sysctl -q -w vm.max_map_count=131070\", shell=True)\n for name, path in list(php_backends_dict.items()):\n php_fpm_config = installation_path+\"/conf/\"+name\n if not os.path.isfile(php_fpm_config):\n # Initiate Jinja2 templateEnv\n templateLoader = jinja2.FileSystemLoader(installation_path + \"/conf/\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n templateVars = {\"PHPNAME\": name}\n default_php_fpm_config_template = templateEnv.get_template('php-fpm.conf')\n default_php_fpm_config = default_php_fpm_config_template.render(templateVars)\n with codecs.open(php_fpm_config, 'w', 'utf-8') as default_php_fpm_config_file:\n default_php_fpm_config_file.write(default_php_fpm_config)\n if os.path.isfile(path+\"/sbin/php-fpm\"):\n php_fpm_bin = path+\"/sbin/php-fpm\"\n else:\n php_fpm_bin = path+\"/usr/sbin/php-fpm\"\n subprocess.call(php_fpm_bin+\" --prefix \"+path+\" --fpm-config \"+php_fpm_config, shell=True)\n elif trigger == \"stop\":\n for path in list(php_backends_dict.values()):\n php_fpm_pid = path+\"/var/run/php-fpm.pid\"\n if os.path.isfile(php_fpm_pid):\n with open(php_fpm_pid) as f:\n mypid = f.read()\n f.close()\n try:\n os.kill(int(mypid), signal.SIGQUIT)\n time.sleep(3) # Give enough time for all child process to exit\n except OSError:\n break\n elif trigger == \"reload\":\n conf_list = os.listdir(\"/var/cpanel/users\")\n for user in conf_list:\n try:\n pwd.getpwnam(user)\n except KeyError:\n silentremove(\"/opt/nDeploy/php-fpm.d/\"+user+\".conf\")\n silentremove(\"/opt/nDeploy/secure-php-fpm.d/\"+user+\".conf\")\n for name, path in list(php_backends_dict.items()):\n php_fpm_config = installation_path+\"/conf/\"+name\n php_fpm_pid = path+\"/var/run/php-fpm.pid\"\n if os.path.isfile(path+\"/sbin/php-fpm\"):\n php_fpm_bin = path+\"/sbin/php-fpm\"\n else:\n php_fpm_bin = path+\"/usr/sbin/php-fpm\"\n if os.path.isfile(php_fpm_pid):\n with open(php_fpm_pid) as f:\n mypid = f.read()\n try:\n os.kill(int(mypid), signal.SIGUSR2)\n except OSError:\n subprocess.call(php_fpm_bin+\" --prefix \"+path+\" --fpm-config \"+php_fpm_config, shell=True)\n time.sleep(3)\n try:\n with open(path + \"/var/run/php-fpm.pid\") as f:\n newpid = f.read()\n except IOError:\n subprocess.call(php_fpm_bin+\" --prefix \"+path+\" --fpm-config \"+php_fpm_config, shell=True)\n try:\n os.kill(int(newpid), 0)\n except OSError:\n subprocess.call(php_fpm_bin+\" --prefix \"+path+\" --fpm-config \"+php_fpm_config, shell=True)\n else:\n subprocess.call(php_fpm_bin+\" --prefix \"+path+\" --fpm-config \"+php_fpm_config, shell=True)\n elif trigger == \"secure-php\":\n try:\n subprocess.call(['systemctl', '--version'])\n except OSError:\n print('secure-php needs systemd . upgrade your cPanel system to CentOS7 ')\n sys.exit(1)\n else:\n for backend_name in list(php_backends_dict.keys()):\n systemd_socket_file = \"/etc/systemd/system/\"+backend_name+\"@.socket\"\n systemd_service_file = \"/etc/systemd/system/\"+backend_name+\"@.service\"\n templateLoader = jinja2.FileSystemLoader(installation_path + \"/conf/\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n socket_template = templateEnv.get_template('secure-php-fpm.socket.j2')\n templateVars = {\"PHP_ROOT_PATH\": php_backends_dict.get(backend_name)}\n socket_generated_config = socket_template.render(templateVars)\n with codecs.open(systemd_socket_file, \"w\", 'utf-8') as confout:\n confout.write(socket_generated_config)\n service_template = templateEnv.get_template('secure-php-fpm.service.j2')\n service_generated_config = service_template.render(templateVars)\n with codecs.open(systemd_service_file, \"w\", 'utf-8') as confout:\n confout.write(service_generated_config)\n subprocess.call(['systemctl', 'daemon-reload'])\n print('Disabling root owned php-fpm master process:')\n subprocess.call(['systemctl', 'stop', 'ndeploy_backends.service'])\n subprocess.call(['systemctl', 'disable', 'ndeploy_backends.service'])\n if not os.path.isfile(installation_path+\"/conf/secure-php-enabled\"):\n os.mknod(installation_path+\"/conf/secure-php-enabled\")\n elif trigger == \"disable-secure-php\":\n conf_list = os.listdir(\"/opt/nDeploy/secure-php-fpm.d\")\n for filename in conf_list:\n user, extension = filename.split('.')\n for backend_name in list(php_backends_dict.keys()):\n subprocess.call(['systemctl', 'stop', backend_name+'@'+user+'.socket'])\n subprocess.call(['systemctl', 'disable', backend_name+'@'+user+'.socket'])\n subprocess.call(['killall', '-SIGKILL', 'php-fpm'])\n silentremove(installation_path+\"/conf/secure-php-enabled\")\n subprocess.call(['systemctl', 'enable', 'ndeploy_backends.service'])\n subprocess.call(['systemctl', 'restart', 'ndeploy_backends.service'])\n # Following is provided to remove legacy Apache PHPFPM selector plugin\n elif trigger == 'httpd-php-uninstall':\n silentremove('/var/cpanel/templates/apache2_4/vhost.local')\n silentremove('/var/cpanel/templates/apache2_4/ssl_vhost.local')\n silentremove(installation_path+'/conf/PHPFPM_SELECTOR_ENABLED')\n subprocess.call(['/scripts/rebuildhttpdconf'], shell=True)\n subprocess.call(['/scripts/restartsrv_httpd'], shell=True)\n elif trigger == 'jailphpfpm':\n if os.path.isfile('/opt/nDeploy/conf/secure-php-enabled'):\n print('php-fpm can be chrooted only if master process is run as root')\n print('Disable secure-php to setup chrooted php-fpm')\n else:\n if os.path.isdir('/opt/nDeploy/conf/nDeploy-cluster'):\n subprocess.call(['/usr/local/cpanel/bin/whmapi1 set_tweaksetting key=jaildefaultshell value=1'], shell=True)\n subprocess.call(['/usr/local/cpanel/bin/whmapi1 set_tweaksetting key=jailapache value=1'], shell=True)\n if not os.path.isdir('/var/cpanel/feature_toggles'):\n os.mkdir('/var/cpanel/feature_toggles')\n subprocess.call(['touch /var/cpanel/feature_toggles/apachefpmjail'], shell=True)\n elif trigger == 'disable-jailphpfpm':\n silentremove('/var/cpanel/feature_toggles/apachefpmjail')\n print('Chrooted PHP-FPM has been disabled.')\n else:\n return\n\n\nbackend_data_yaml = open(backend_config_file, 'r')\nbackend_data_yaml_parsed = yaml.safe_load(backend_data_yaml)\nbackend_data_yaml.close()\n\n\nparser = argparse.ArgumentParser(description=\"Start/Stop various nDeploy backends\")\nparser.add_argument(\"control_command\")\nargs = parser.parse_args()\ntrigger = args.control_command\ncontrol_php_fpm(trigger)\n","repo_name":"AnoopAlias/AUTOM8N","sub_path":"scripts/init_backends.py","file_name":"init_backends.py","file_ext":"py","file_size_in_byte":9948,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"53"}
+{"seq_id":"73274044969","text":"import datetime\nimport os\nimport sqlite3\nimport sys\n\nimport pandas as pd\nimport pyvibe as pv\nfrom flask import Flask\n\napp = Flask(import_name=__name__)\napp.config[\"CACHE_TYPE\"] = \"null\"\napp.config[\"SEND_FILE_MAX_AGE_DEFAULT\"] = datetime.timedelta(seconds=1)\napp.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(minutes=10)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n page = pv.Page()\n page.add_header(\"NBA WEST rank board\")\n\n data = get_rank_data_from_db(west=True)\n columns = ['rank', 'team', 'win', 'loss']\n\n df = pd.DataFrame(data, columns=columns)\n page.add_pandastable(df)\n return page.to_html()\n\n\n@app.route('/east', methods=['GET', 'POST'])\ndef east_rank():\n page = pv.Page()\n page.add_header(\"NBA EAST rank board\")\n page.add\n\n data = get_rank_data_from_db(west=False)\n columns = ['rank', 'team', 'win', 'loss']\n\n df = pd.DataFrame(data, columns=columns)\n page.add_pandastable(df)\n return page.to_html()\n\n\n@app.route('/pics', methods=['GET', 'POST'])\ndef pics():\n page = pv.Page()\n page.add_header(\"Pictures from AI\")\n cc = page.add_container(grid_columns=3)\n card1 = cc.add_card()\n card2 = cc.add_card()\n card3 = cc.add_card()\n card4 = cc.add_card()\n card5 = cc.add_card()\n card6 = cc.add_card()\n\n card1.add_image(url='https://raw.githubusercontent.com/leslieyuan/leslieyuan.github.io/master/pics/cat_on_wall.png',\n alt='cat on wall')\n card2.add_image(url='https://github.com/leslieyuan/leslieyuan.github.io/blob/master/pics/dolphin_1.png?raw=true',\n alt='dolphin')\n card3.add_image(url='https://github.com/leslieyuan/leslieyuan.github.io/blob/master/pics/dolphin_2.png?raw=true',\n alt='dolphin')\n card4.add_image(url='https://github.com/leslieyuan/leslieyuan.github.io/blob/master/pics/cat_1.png?raw=true',\n alt='cat')\n card5.add_image(url='https://github.com/leslieyuan/leslieyuan.github.io/blob/master/pics/dolphin_3.png?raw=true',\n alt='dolphin')\n card6.add_image(url='https://github.com/leslieyuan/leslieyuan.github.io/blob/master/pics/woman_1.png?raw=true',\n alt='woman')\n\n return page.to_html()\n\n\ndef get_rank_data_from_db(west=True):\n data = []\n connection, cursor = None, None\n try:\n connection = sqlite3.connect('./service/rank.db')\n cursor = connection.cursor()\n if west:\n cursor.execute('SELECT rank, team, win, loss FROM t_nba_rank WHERE east_west=1')\n else:\n cursor.execute('SELECT rank, team, win, loss FROM t_nba_rank WHERE east_west=0')\n for rank, team, win, loss in cursor.fetchall():\n data.append([rank, team, win, loss])\n except Exception as ex:\n print(ex)\n finally:\n try:\n cursor.close()\n connection.close()\n except:\n pass\n\n return data\n\n\ndef daemonize(b_stat):\n if not b_stat:\n return\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n\n # second fork\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n # redirect standard file descriptors\n sys.stdout.flush()\n sys.stderr.flush()\n\n\nif __name__ == '__main__':\n # run\n #daemonize(True)\n app.run(host='0.0.0.0', port=31758, debug=False)\n","repo_name":"leslieyuan/nba_dashboard","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26938306879","text":"from django.db import models\nfrom django.utils import timezone\nfrom ckeditor.fields import RichTextField\n\nSUBJECTS = (\n\t('TLE','TLE'),\n\t('MATH','Mathematics'),\n\t('ENGLISH','ENGLISH'),\n\t('FILIPINO','FILIPINO'),\n\t('MAPEH','MAPEH'),\n\t('SCIENCE','SCIENCE'),\n\t('ESP','ESP'),\n\t('AP','Araling Panlipunan')\n)\n\ndef x_ago_helper(diff):\n\tif diff.days > 0:\n\t\treturn f'{diff.days} days ago'\n\tif diff.seconds == 0 or diff.seconds == 1:\n\t\treturn 'Just Now'\n\tif diff.seconds < 60:\n\t\treturn f'{diff.seconds} seconds ago'\n\tif diff.seconds < 3600:\n\t\treturn f'{diff.seconds // 60} minutes ago'\n\treturn f'{diff.seconds // 3600} hours ago'\n\n\nclass Task(models.Model):\n\ttitle = models.CharField(max_length=200)\n\tdesc = RichTextField(blank=True, null=True)\n\tsummary = RichTextField(blank=True, null=True)\n\tarchived = models.BooleanField(default=False)\n\tsubject = models.CharField(choices=SUBJECTS, max_length=50, default='TLE')\n\tweek_num = models.IntegerField()\n\tcreated = models.DateTimeField(editable=False)\n\tmodified = models.DateTimeField()\n\n\tdef x_ago(self):\n\t\tdiff = timezone.now() - self.created\n\t\treturn x_ago_helper(diff)\n\n\tdef save(self, *args, **kwargs):\n\t\tif not self.id:\n\t\t\tself.created = timezone.now()\n\t\tself.modified = timezone.now()\n\t\treturn super(Task, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn self.title\n\nclass TaskNote(models.Model):\n\ttask = models.ForeignKey(Task, on_delete=models.CASCADE)\n\tnote = models.TextField()\n\tcreated = models.DateTimeField(editable=False)\n\tmodified = models.DateTimeField()\n\thidden = models.BooleanField(default=False)\n\n\tdef x_ago(self):\n\t\tdiff = timezone.now() - self.created\n\t\treturn x_ago_helper(diff)\n\n\tdef save(self, *args, **kwargs):\n\t\tif not self.id:\n\t\t\tself.created = timezone.now()\n\t\tself.modified = timezone.now()\n\t\treturn super(TaskNote, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn self.note\n\t\t","repo_name":"jabezborja/To-Do-Modules","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"41083601981","text":"from glob import glob\nfrom os import path\nfrom pandas import concat, DataFrame\nfrom .measure import get_df_properties, measure_segmentation_iou, show_props\nfrom .segmentation import Segment\n\n\ndef get_segmentations(origs: list[str],\n save_dir: str = 'data/exports/all',\n plot: bool = False) -> None:\n ''' Finds all input files and executes four segmentation methods and\n automatically saves then to save_dir\n Args\n input_dir (str) pathlike, the directory that contains the images to be\n segmented\n filetype (str) jpg or png, are the filetype of the pictures on the\n input_dir.\n save_dir (str) pathlike, the direcotry the segmented images will be\n saved\n plot (bool) if True the images are plotted side by side with their\n original instead of being saved as a single image\n '''\n print('Initializing Segmentation', flush=True)\n print(f'Found {len(origs)} to segment', flush=True)\n for i, img in enumerate(origs):\n print(f'Segmenting {i}th Image: {img}\\n', flush=True)\n segment = Segment(img=img, plot=plot, dir=save_dir)\n segment.isodata()\n segment.canny()\n segment.flood_fill()\n segment.iterative_cluster()\n segment.chanvese()\n segment.sauvola()\n del segment\n\n\ndef get_metrics_df(\n imgs: list[str],\n csv_fname: str = 'metrics.csv') -> DataFrame:\n ''' Builds a pandas dataframe with the metrics axis_major_length,\n axis_minor_length and area of all images on imgs using\n skimage.measure.regionprops_table, saves these metrics to a csv file and\n returns the dataframe '''\n names: dict[str, list[str]] = {\n 'name': [path.basename(img) for img in imgs]}\n df: DataFrame = get_df_properties(imgs)\n # Adding names to df\n df = concat([DataFrame(names), df], axis=1)\n df.sort_values(by='name', inplace=True, ignore_index=True)\n df.to_csv(csv_fname)\n return df\n\n\ndef _measure_segmentation(\n imgs_true: list[str],\n tests: list[tuple[str, ...]],\n names: list[str],\n csv_fname: str = 'csvs/compared_precision.csv') -> DataFrame:\n ''' Calculates the precision of the segmentation of tests compared to the\n gold standards imgs_true using skimage.metrics.adapted_rand_error,\n encapsulates these metrics into a pandas dataframe, saves it to a csv file\n and returns it '''\n df: DataFrame = measure_segmentation_iou(imgs_true, tests, names)\n df.to_csv(csv_fname)\n print(df)\n print()\n print(df.sum())\n return df\n\n\ndef get_tests_measures(\n trues: list[str],\n tests_dir: list[str] = ['data/exports/gold/isodata/*.png',\n 'data/exports/gold/canny/*.png',\n 'data/exports/gold/flood_fill/*.png'],\n seg_names: list[str] = ['isodata', 'canny', 'flood_fill']) -> None:\n ''' Measures the best segmentation by calculating their precision, storing\n all the results in a dataframe and summing every column to see which has\n the higher value.\n Important:\n Make sure all images in tests_dir sort in the same order or\n _measure_segmentation will raise an error if the image files don't\n match their shape\n If the images have the same shape, it won't raise an error, but the\n precision will be completelly off\n '''\n assert len(tests_dir) == len(seg_names), \\\n 'tests_dir and seg_names lengths must match'\n tests_images: list[list[str]]\n tests_images = [sorted(glob(test_dir)) for test_dir in tests_dir]\n # zipping all segmentation of the same original -> [(01, 01),(02, 02)]\n _measure_segmentation(trues, list(zip(*tests_images)), seg_names)\n\n\ndef metrics_n_plot(directory: str = 'data/exports/all/*.png') -> None:\n ''' Quick and dirty function that saves the dataframe with the segmented\n images metrics and plots their images with bounding box and major and minor\n lengths '''\n segs: list[str] = glob(directory)\n get_metrics_df(segs)\n # Loops through segs, loads each image and calculates it's properties\n # axis_major_length, axis_minor_length, area, centroid and boundingbox\n # using skimage.measure.regionprops and plots these informations on the\n # original segmented image using matplotlib.pyplot, it then saves this plot\n # as a new image\n for img in segs:\n show_props(img)\n\n\ndef util(true_dir: str = 'data/gold/*.jpg',\n origs_dir: str = 'data/input/*.jpg',\n **kwargs) -> None:\n '''\n kwargs\n segment (bool) if util must segment the original images\n measure (bool) if util must calculate the segmentation precision\n '''\n trues: list[str] = sorted(glob(true_dir))\n origs: list[str]= sorted(glob(origs_dir))\n\n if kwargs.get('segment'):\n get_segmentations(origs, plot=True)\n if kwargs.get('measure'):\n get_tests_measures(trues)\n else:\n metrics_n_plot()\n","repo_name":"duartqx/AtlanticoCompCogn","sub_path":"projetos/PDI/src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37306314808","text":"# Author: Deepak Kumar Singh\n# Description: Car Manager class\n# Date Created: 09/01/2022\n# Date Modified: 09/01/2022\n\nimport random\nfrom turtle import Turtle\nimport time\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager(Turtle):\n def __init__(self, num_of_cars):\n super().__init__()\n self.carlist = []\n self.ypos = 240\n self.timer = 0.1\n self.new_x = 0\n self.new_y = 0\n self.create(num_of_cars)\n\n def create(self, num_of_cars):\n for car in range(num_of_cars):\n seg = Turtle(\"square\")\n #self.shape(\"square\")\n seg.penup()\n seg.shapesize(stretch_wid=1, stretch_len=2)\n seg.color(random.choice(COLORS))\n seg.goto(260, self.ypos)\n self.ypos -= 90\n self.carlist.append(seg)\n\n def move(self):\n for car in range(0, len(self.carlist)):\n time.sleep(self.timer)\n self.new_x = self.carlist[car].xcor() - (STARTING_MOVE_DISTANCE*random.randint(1, 10))\n self.new_y = self.carlist[car].ycor()\n self.carlist[car].goto(self.new_x, self.new_y)\n\n def reset_position(self, obj):\n ypos2 = obj.ycor()\n obj.goto(240, ypos2)\n\n def level_up(self):\n self.new_x -= MOVE_INCREMENT # needs some improvement here.\n # self.timer = 0.05\n\n\n\n","repo_name":"iamdeepaksingh/100DaysofCode","sub_path":"Day23/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71423908969","text":"# 问题分析:兔子有寿命,新生兔子1个月成熟,第三个月生下一对兔子\r\n# 首先初始化一个列表list,用于存储每个月的兔子数,值的依次顺序为���存N个月的兔子数\r\n# 可以发现,第1个月时,首月1对兔子,后面全是0,因此\r\n# 第1个月为:[1,0,0,0,0,0,...]\r\n# 第2个月为:[0,1,0,0,0,0,...]\r\n# 第3个月为:[1,0,1,0,0,0,...]\r\n# 第4个月为:[1,1,0,1,0,0,...]\r\n# 第5个月为:[2,1,1,0,1,0,...]\r\n# 总结规律:新生兔子(即list[0])数量为后面所有值的和,更新月份时,列表数字“往右移一格”\r\n\r\nimport sys\r\n\r\nMonthN, SurvivalM = int(sys.argv[1]), int(sys.argv[2])\r\n\r\nif SurvivalM == 1:\r\n print(\"The survival time is 1 month, the rabbit has not reproduced and is extinct!\")\r\n exit()\r\n\r\n# 初始化数组\r\nl = []\r\nfor i in range(0, SurvivalM):\r\n l.append(0)\r\n\r\ndef MortalFibRabbits(li, n):\r\n if n == 1:\r\n li[0] = 1\r\n return(li, 1)\r\n else:\r\n tmp = MortalFibRabbits(li, n-1)\r\n li = tmp[0]\r\n li[-1] = sum(li[1:])\r\n li = li[-1:] + li[:-1]\r\n # print(li)\r\n return(li, tmp[1])\r\n\r\nx = MortalFibRabbits(l, MonthN)\r\n\r\nprint(sum(x[0]))\r\n","repo_name":"AlsoATraveler/RosalindRecord","sub_path":"MortalFibonacciRabbits/MortalFibonacciRabbits.py","file_name":"MortalFibonacciRabbits.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9930225051","text":"import json\nimport os\n\nimport boto3\nimport pytest\n\nfrom lariat_agents.agent.athena.athena_agent import AthenaAgent\nfrom lariat_agents.base.base_agent import BaseAgent\nfrom lariat_agents.base.tests.data.test_cases import tests\nfrom lariat_python_common.test.utils import data_loader, get_test_labels\nfrom moto import mock_athena, mock_s3\nfrom lariat_agents.constants import CLOUD_AGENT_CONFIG_PATH\n\n\n@pytest.fixture\ndef aws_credentials():\n \"\"\"Mocked AWS Credentials for moto.\"\"\"\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"test_lariat\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"test_lariat\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"test_lariat\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"test_lariat\"\n\n\n@mock_athena\n@mock_s3\n@pytest.mark.parametrize(\n \"indicators, expect\",\n data_loader(\n tests[\"execute_indicators\"],\n [\n \"indicators\",\n \"expect\",\n ],\n ),\n ids=get_test_labels(tests[\"execute_indicators\"]),\n)\ndef test_execute_indicators(aws_credentials, indicators, expect):\n \"\"\"\n This tests the execute_indicators functionality in BaseAgent.\n \"\"\"\n test_file_data = json.dumps({\"source_id\": \"source_id1\"}).encode()\n athena = boto3.client(\"athena\", region_name=\"us-east-1\")\n s3 = boto3.client(\"s3\", region_name=\"us-east-1\")\n test_bucket_name = CLOUD_AGENT_CONFIG_PATH.split(\"/\")[0]\n test_cloud_config_path = CLOUD_AGENT_CONFIG_PATH[CLOUD_AGENT_CONFIG_PATH.index(\"/\")+1:]\n s3.create_bucket(Bucket=test_bucket_name)\n s3.put_object(\n Bucket=test_bucket_name,\n Key=test_cloud_config_path,\n Body=test_file_data,\n )\n agent = AthenaAgent(\n agent_type=\"athena\", cloud=\"aws\", athena_handler=athena, s3_handler=s3\n )\n response = agent.execute_indicators(indicators=indicators, expect_results=True)\n assert response == expect\n\n\n@pytest.mark.parametrize(\n \"calculation, expect\",\n data_loader(\n tests[\"get_sketch_type_from_calculation\"],\n [\n \"calculation\",\n \"expect\",\n ],\n ),\n ids=get_test_labels(tests[\"get_sketch_type_from_calculation\"]),\n)\ndef test_get_sketch_type_from_calculation(calculation, expect):\n \"\"\"\n This tests the get_sketch_type_from_calculation functionality in BaseAgent.\n \"\"\"\n response = BaseAgent.get_sketch_type_from_calculation(calculation)\n assert response == expect\n","repo_name":"lariat-data/lariat-agents","sub_path":"lariat_agents/base/tests/test_base_agent.py","file_name":"test_base_agent.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"40301656706","text":"\"\"\" A classifier model which wraps around a backbone. This setup allows for easy\ninterchangeability during experimentation and a reliable way to load saved models. \"\"\"\n\nimport yaml\n\nimport torch\n\nfrom core import pull_assets\nfrom third_party.models import resnet\nfrom third_party.efficientdet import efficientnet\nfrom third_party.vovnet import vovnet\n\n\nclass Classifier(torch.nn.Module):\n def __init__(\n self,\n img_width: int,\n img_height: int,\n num_classes: int = 2,\n version: str = None,\n backbone: str = None,\n use_cuda: bool = False,\n half_precision: bool = False,\n ) -> None:\n \"\"\"\n Args:\n img_width: The width of the input images.\n img_height: The height of the input images.\n num_classes: The number of classes to predict.\n version: The version of the model to download from bintray.\n backbone: A string designating which model to load.\n use_cuda: Wether this model is going to be used on gpu.\n half_precision: Wether to use half precision for inference. For now\n half_precision doesn't work well with training. Maybe in PyTorch 1.6.0.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.use_cuda = use_cuda\n self.half_precision = half_precision\n if backbone is None and version is None:\n raise ValueError(\"Must supply either model version or backbone to load\")\n\n # If a version is given, download from bintray\n if version is not None:\n # Download the model. This has the yaml containing the backbone.\n model_path = pull_assets.download_model(\n model_type=\"classifier\", version=version\n )\n # Load the config in the package to determine the backbone\n config = yaml.safe_load((model_path / \"config.yaml\").read_text())\n backbone = config.get(\"model\", {}).get(\"backbone\", None)\n # Construct the model, then load the state\n self.model = self._load_backbone(backbone)\n self.load_state_dict(\n torch.load(model_path / \"classifier.pt\", map_location=\"cpu\")\n )\n else:\n # If no version supplied, just load the backbone\n self.model = self._load_backbone(backbone)\n\n self.model.eval()\n if self.use_cuda and self.half_precision:\n self.model.cuda()\n self.model.half()\n\n def __call__(self, x: torch.Tensor) -> torch.Tensor:\n # If using cuda and not training, assume inference.\n if self.use_cuda and self.half_precision:\n x = x.half()\n return self.model(x)\n\n def _load_backbone(self, backbone: str) -> torch.nn.Module:\n \"\"\" Load the supplied backbone. \"\"\"\n if backbone in efficientnet._MODEL_SCALES:\n model = efficientnet.EfficientNet(\n backbone=backbone, num_classes=self.num_classes\n )\n elif backbone == \"resnet18\":\n model = resnet.resnet18(num_classes=self.num_classes)\n elif \"vovnet\" in backbone:\n model = vovnet.VoVNet(backbone, num_classes=self.num_classes)\n else:\n raise ValueError(f\"Unsupported backbone {backbone}.\")\n\n return model\n\n def classify(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\" Take in an image batch and return the class for each image. \"\"\"\n if self.use_cuda and self.half_precision:\n x = x.half()\n _, predicted = torch.max(self.model(x).data, 1)\n return predicted\n","repo_name":"alexwitt23/uav-austin","sub_path":"core/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14349951581","text":"import cv2\nimport numpy as np\nimport os\n\ndef detectFaceOpenCVDnn(net, frame, framework=\"caffe\", conf_threshold=0.7):\n frameOpencvDnn = frame.copy()\n frameHeight = frameOpencvDnn.shape[0]\n frameWidth = frameOpencvDnn.shape[1]\n if framework == \"caffe\":\n blob = cv2.dnn.blobFromImage(\n frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], False, False,\n )\n else:\n blob = cv2.dnn.blobFromImage(\n frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False,\n )\n\n net.setInput(blob)\n detections = net.forward()\n bboxes = []\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > conf_threshold:\n x1 = int(detections[0, 0, i, 3] * frameWidth)\n y1 = int(detections[0, 0, i, 4] * frameHeight)\n x2 = int(detections[0, 0, i, 5] * frameWidth)\n y2 = int(detections[0, 0, i, 6] * frameHeight)\n bboxes.append([x1, y1, x2, y2])\n cv2.rectangle(\n frameOpencvDnn,\n (x1, y1),\n (x2, y2),\n (0, 255, 0),\n int(round(frameHeight / 150)),\n 8,\n )\n return frameOpencvDnn, bboxes\n \ndef getKeyPoints(frame):\n\n #Import Pose Detection Models\n protoFile = os.path.join(\"models\", \"pose\", \"pose_deploy_linevec.prototxt\")\n weightsFile = os.path.join(\"models\", \"pose\", \"pose_iter_440000.caffemodel\")\n nPoints = 18\n POSE_PAIRS = [ [1,0],[1,2],[1,5],[2,3],[3,4],[5,6],[6,7],[1,8],[8,9],[9,10],[1,11],[11,12],[12,13],[0,14],[0,15],[14,16],[15,17]]\n net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n net.setPreferableBackend(cv2.dnn.DNN_TARGET_CPU)\n\n # Import Face Detection Models\n modelFile = os.path.join(\"models\", \"facedetect\", \"res10_300x300_ssd_iter_140000_fp16.caffemodel\")\n configFile = os.path.join(\"models\", \"facedetect\", \"deploy.prototxt\")\n net2 = cv2.dnn.readNetFromCaffe(configFile, modelFile)\n\n frameSkeleton = np.copy(frame)\n frameKeyPoints = np.copy(frame)\n facialDetectFrame = np.copy(frame)\n frameWidth = frame.shape[1]\n frameHeight = frame.shape[0]\n threshold = 0.1\n\n outOpencvDnn, bboxes = detectFaceOpenCVDnn(net2, facialDetectFrame)\n #cv2.imwrite('face-detection.jpg', outOpencvDnn)\n\n # input image dimensions for the network\n inWidth = 368\n inHeight = 368\n inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n\n net.setInput(inpBlob)\n\n output = net.forward()\n\n H = output.shape[2]\n W = output.shape[3]\n\n # Empty list to store the detected keypoints\n points = []\n\n for i in range(nPoints):\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n \n # Scale the point to fit on the original image\n x = (frameWidth * point[0]) / W\n y = (frameHeight * point[1]) / H\n\n if prob > threshold : \n cv2.circle(frameKeyPoints, (int(x), int(y)), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(frameKeyPoints, \"{}\".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n points.append((int(x), int(y)))\n else :\n points.append(None)\n\n # Draw Skeleton\n for pair in POSE_PAIRS:\n partA = pair[0]\n partB = pair[1]\n\n if points[partA] and points[partB]:\n cv2.line(frameSkeleton, points[partA], points[partB], (0, 255, 255), 2)\n cv2.circle(frameSkeleton, points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)\n\n #cv2.imwrite('Output-Keypoints.jpg', frameKeyPoints)\n #cv2.imwrite('Output-Skeleton.jpg', frameSkeleton)\n\n for i in range(len(points)):\n if points[i] == None:\n points[i] = (-1, -1)\n\n keypoints = {\n 'Bounding Box' : [bboxes[0][3], bboxes[0][2]],\n 'Neck' : [points[1][0], points[1][1]],\n 'Nose' : [points[0][0], points[0][1]],\n 'Left Eye' : [points[14][0], points[14][1]],\n 'Right Eye' : [points[15][0], points[15][1]],\n 'Left Ear' : [points[16][0], points[16][1]],\n 'Right Ear' : [points[17][0], points[17][1]],\n 'Left Shoulder' : [points[2][0], points[2][1]],\n 'Right Shoulder' : [points[5][0], points[5][1]]\n }\n return keypoints\n\ndef overall_score(proximity, slump, front_tilt, head_tilt, shoulder_tilt, shoulder_width):\n score = np.abs((proximity[-1]-proximity[0])/proximity[0])\n score += np.abs((slump[-1]-slump[0])/slump[0])\n score += np.abs((front_tilt[-1]-front_tilt[0])/front_tilt[0])\n score += np.abs((head_tilt[-1]-head_tilt[0])/head_tilt[0])\n score += np.abs((shoulder_tilt[-1]-shoulder_tilt[0])/shoulder_tilt[0])\n score += np.abs((shoulder_width[-1]-shoulder_width[0])/shoulder_width[0])\n return score\n\nclass CheckPosture:\n\n def __init__(self, scale, key_points={}, baseline_points={}, sensitivity={}):\n self.key_points = key_points\n self.baseline_points = baseline_points\n self.sensitivity = sensitivity\n self.scale = scale\n self.message = \"\"\n\n def set_key_points(self, key_points):\n self.key_points = key_points\n\n def set_baseline(self, baseline_points):\n self.baseline_points = baseline_points\n \n def set_sensitivity(self, sensitivity):\n self.sensitivity = sensitivity\n\n def build_message(self):\n message = \"\"\n if self.check_proximity()[0]:\n message += \"You're too close to the computer screen - try to lean back!!\"\n if self.check_slump()[0]:\n message += \"Sit up in your chair, you're slumping!\"\n if self.check_forward_tilt()[0]:\n message += \"Lift up your head!\"\n if self.check_head_tilt()[0]:\n if self.check_head_tilt()[1] < 0:\n message += \"You're leaning your head to the right! Don't put too much pressure on one side!\"\n else:\n message += \"You're leaning your head to the left! Don't put too much pressure on one side!\"\n if self.check_shoulder_tilt()[0]:\n if self.check_shoulder_tilt()[1] < 0:\n message += \"Your left shoulder is higher than your right shoulder! Don't put too much pressure on one side!\"\n else:\n message += \"Your right shoulder is higher than your left shoulder! Don't put too much pressure on one side!\"\n if self.check_shoulder_width()[0]:\n message += \"You're curving your shoulders inwards. Try to sit with them further back.\"\n \n self.message = message\n return message\n\n def get_message(self):\n return self.message\n\n def set_scale(self, scale):\n self.scale = scale\n\n def get_scale(self):\n return self.scale\n\n def get_distance(self):\n if self.key_points['Bounding Box'][0] != -1 and self.key_points['Bounding Box'][1] != -1:\n v = np.sqrt((self.key_points['Bounding Box'][0] * self.key_points['Bounding Box'][1])/(self.baseline_points['Bounding Box'][0] * self.baseline_points['Bounding Box'][1]))\n return v\n return 1\n\n def check_proximity(self):\n p1 = self.sensitivity['Proximity'][0]\n p2 = self.sensitivity['Proximity'][1]\n if self.key_points['Bounding Box'][0] != -1 and self.key_points['Bounding Box'][1] != -1:\n v = np.sqrt((self.key_points['Bounding Box'][0] * self.key_points['Bounding Box'][1])/(self.baseline_points['Bounding Box'][0] * self.baseline_points['Bounding Box'][1]))\n if v > p1 or v < p2:\n return [True, v]\n return [False, v]\n\n def check_slump(self):\n p1 = self.sensitivity['Slump'][0]\n b = self.baseline_points['Nose'][1] - self.baseline_points['Neck'][1]\n if self.key_points['Neck'][1] != -1 and self.key_points['Nose'][1] != -1:\n v = self.key_points['Nose'][1] - self.key_points['Neck'][1]\n if (v - b)/self.get_distance() > p1:\n return [True, (v - b)/self.get_distance()]\n return [False, (v - b)/self.get_distance()]\n return [False, b]\n\n def check_forward_tilt(self):\n p1 = self.sensitivity['Forward Tilt'][0]\n b = self.baseline_points['Left Eye'][1] - self.baseline_points['Left Ear'][1]\n if self.key_points['Left Eye'][1] != -1 and self.key_points['Left Ear'][1] != -1:\n v = self.key_points['Left Eye'][1] - self.key_points['Left Ear'][1]\n if (v - b)/self.get_distance() > p1:\n return [True, (v - b)/self.get_distance()]\n return [False, (v - b)/self.get_distance()]\n b = self.baseline_points['Right Eye'][1] - self.baseline_points['Right Ear'][1]\n if self.key_points['Right Eye'][1] != -1 and self.key_points['Right Ear'][1] != -1:\n v = self.key_points['Right Eye'][1] - self.key_points['Right Ear'][1]\n if (v - b)/self.get_distance() > p1:\n return [True, (v - b)/self.get_distance()]\n return [False, (v - b)/self.get_distance()]\n return [False, b]\n\n def check_head_tilt(self):\n p1 = self.sensitivity['Head Tilt'][0]\n b = self.baseline_points['Right Eye'][1] - self.baseline_points['Left Eye'][1]\n if self.key_points['Right Eye'][1] != -1 and self.key_points['Left Eye'][1] != -1:\n v = self.key_points['Right Eye'][1] - self.key_points['Left Eye'][1]\n if np.abs((v - b)/self.get_distance()) > p1:\n return [True, (v - b)/self.get_distance()]\n return [False, (v - b)/self.get_distance()]\n return [False, b]\n\n def check_shoulder_tilt(self):\n p1 = self.sensitivity['Shoulder Tilt'][0]\n b = self.baseline_points['Right Shoulder'][1] - self.baseline_points['Left Shoulder'][1]\n if self.key_points['Right Shoulder'][1] != -1 and self.key_points['Left Shoulder'][1] != -1:\n v = self.key_points['Right Shoulder'][1] - self.key_points['Left Shoulder'][1]\n if np.abs((v - b)/self.get_distance()) > p1:\n return [True, (v - b)/self.get_distance()]\n return [False, (v - b)/self.get_distance()]\n return [False, b]\n \n def check_shoulder_width(self):\n p1 = self.sensitivity['Shoulder Width'][0]\n b = self.baseline_points['Right Shoulder'][0] - self.baseline_points['Left Shoulder'][0]\n if self.key_points['Right Shoulder'][0] != -1 and self.key_points['Left Shoulder'][0] != -1:\n v = self.key_points['Right Shoulder'][0] - self.key_points['Left Shoulder'][0]\n if (np.abs(v) - np.abs(b))/self.get_distance() < p1:\n return [True, (np.abs(v) - np.abs(b))/self.get_distance()]\n return [False, (np.abs(v) - np.abs(b))/self.get_distance()]\n return [False, np.abs(b)]","repo_name":"ronnachum11/PoseCheck","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"17644179340","text":"from typing import Optional\n\nfrom .boardstate import BoardState\n\nfrom itertools import product\n\nimport numpy as np\n\n\nclass PositionEvaluation:\n values = [\n [-4, -3, -2, 1, 1, 1, 0, 0],\n [-3, -2, -1, 3, 3, 2, 1, 1],\n [-2, -1, 0, 3, 5, 4, 1, 1],\n [1, 3, 3, 3, 6, 6, 6, 4],\n [1, 3, 5, 6, 8, 8, 8, 8],\n [1, 2, 4, 6, 8, 10, 11, 11],\n [0, 1, 1, 6, 8, 11, 12, 12],\n [0, 1, 1, 4, 8, 11, 12, 14]\n ]\n\n def __call__(self, board: BoardState) -> float:\n my_board = board if board.current_player == 1 else board.inverted()\n enemy_board = my_board.inverted()\n\n if board.is_game_finished:\n winner = board.winner\n if winner == board.current_player:\n return 10000\n elif winner == -board.current_player:\n return -10000\n else:\n return 0\n\n result = 0\n my_min = 100\n enemy_min = 100\n for x, y in product(range(8), range(8)):\n if my_board.board[x, y] == 1:\n my_min = min(my_min, self.values[x][y])\n result += self.values[x][y]\n if enemy_board.board[x, y] == 1:\n enemy_min = min(enemy_min, self.values[x][y])\n result -= self.values[x][y]\n\n result += my_min * 1\n result -= enemy_min * 1\n return result\n\n\nclass AI:\n def __init__(self, position_evaluation: PositionEvaluation, search_depth: int):\n self.position_evaluation: PositionEvaluation = position_evaluation\n self.depth: int = search_depth\n self.first_check = 5\n\n @staticmethod\n def init():\n return AI(PositionEvaluation(), 3)\n\n # @timeLimit(2)\n def next_move(self, board: BoardState) -> Optional[BoardState]:\n result = self._minimax(self.depth, board)\n return result[1]\n\n def _minimax(self, depth, state: BoardState):\n if state.is_game_finished:\n return self.position_evaluation(state), state\n\n if depth == 0:\n return self.position_evaluation(state), state\n\n moves = state.possible_moves\n moves.sort(key=self.position_evaluation)\n answer = None\n\n for i in range(min(len(moves), self.first_check)):\n move = moves[i]\n tmp = self._minimax(depth - 1, move)\n if answer is None or answer[0] < -tmp[0]:\n answer = tmp[0] * -1, move\n\n return answer","repo_name":"hadingus/Game_Ugolki","sub_path":"AI/ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"74757282729","text":"from django.conf import settings\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom thenewboston.api.accounts import fetch_balance, wire_funds\nfrom thenewboston.general.constants import TRANSACTION_FEE\nfrom thenewboston.general.permissions import IsObjectOwnerOrReadOnly\n\nfrom ..models import Wallet, Wire\nfrom ..models.wire import WireType\nfrom ..serializers.block import BlockSerializer\nfrom ..serializers.wallet import WalletReadSerializer, WalletWriteSerializer\nfrom ..serializers.wire import WireSerializer\nfrom ..serializers.withdraw import WithdrawSerializer\n\n\nclass WalletViewSet(\n mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet\n):\n permission_classes = [IsAuthenticated, IsObjectOwnerOrReadOnly]\n queryset = Wallet.objects.none()\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n wallet = serializer.save()\n read_serializer = WalletReadSerializer(wallet, context={'request': request})\n\n return Response(read_serializer.data, status=status.HTTP_201_CREATED)\n\n @action(detail=True, methods=['post'])\n def deposit(self, request, pk=None):\n wallet = self.get_object()\n minimum_balance = TRANSACTION_FEE + 1\n\n if wallet.deposit_balance < minimum_balance:\n return Response({'error': f'Minimum balance of {minimum_balance} required.'},\n status=status.HTTP_400_BAD_REQUEST)\n\n block = wire_funds(\n amount=wallet.deposit_balance - TRANSACTION_FEE,\n domain=wallet.core.domain,\n recipient_account_number_str=settings.ACCOUNT_NUMBER,\n sender_signing_key_str=wallet.deposit_signing_key,\n )\n block_serializer = BlockSerializer(data=block)\n\n if block_serializer.is_valid(raise_exception=True):\n wire = Wire.objects.create(\n **block_serializer.validated_data,\n core=wallet.core,\n owner=wallet.owner,\n wire_type=WireType.DEPOSIT,\n )\n wallet.balance += wire.amount\n wallet.save()\n else:\n return Response({'error': 'Invalid block'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n deposit_balance = fetch_balance(account_number=wallet.deposit_account_number, domain=wallet.core.domain)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n wallet.deposit_balance = deposit_balance\n wallet.save()\n\n response_data = {\n 'wallet': WalletReadSerializer(wallet, context={\n 'request': request\n }).data,\n 'wire': WireSerializer(wire).data,\n }\n\n return Response(response_data, status=status.HTTP_201_CREATED)\n\n @action(detail=True, methods=['get'])\n def deposit_balance(self, request, pk=None):\n wallet = self.get_object()\n\n try:\n deposit_balance = fetch_balance(account_number=wallet.deposit_account_number, domain=wallet.core.domain)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n wallet.deposit_balance = deposit_balance\n wallet.save()\n read_serializer = WalletReadSerializer(wallet, context={'request': request})\n\n return Response(read_serializer.data, status=status.HTTP_200_OK)\n\n def get_queryset(self):\n user = self.request.user\n return Wallet.objects.filter(owner=user)\n\n def get_serializer_class(self):\n if self.action == 'create':\n return WalletWriteSerializer\n\n return WalletReadSerializer\n\n @action(detail=True, methods=['post'])\n def withdraw(self, request, pk=None):\n wallet = self.get_object()\n serializer = WithdrawSerializer(data=request.data, context={'wallet': wallet})\n serializer.is_valid(raise_exception=True)\n\n account_number = serializer.validated_data['account_number']\n amount = serializer.validated_data['amount']\n\n block = wire_funds(\n amount=amount - TRANSACTION_FEE,\n domain=wallet.core.domain,\n recipient_account_number_str=account_number,\n sender_signing_key_str=settings.SIGNING_KEY,\n )\n block_serializer = BlockSerializer(data=block)\n\n if block_serializer.is_valid(raise_exception=True):\n wire = Wire.objects.create(\n **block_serializer.validated_data,\n core=wallet.core,\n owner=wallet.owner,\n wire_type=WireType.WITHDRAW,\n )\n wallet.balance -= amount\n wallet.save()\n else:\n return Response({'error': 'Invalid block'}, status=status.HTTP_400_BAD_REQUEST)\n\n response_data = {\n 'wallet': WalletReadSerializer(wallet, context={\n 'request': request\n }).data,\n 'wire': WireSerializer(wire).data,\n }\n\n return Response(response_data, status=status.HTTP_201_CREATED)\n","repo_name":"thenewboston-developers/thenewboston-Backend","sub_path":"thenewboston/wallets/views/wallet.py","file_name":"wallet.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"14875695900","text":"import pytest\nimport os\nimport logging\nimport httpx\n\nimport random\nimport json\n\n\nURL = os.getenv(\"URL_PET_STORE_API\") \n\n\ndef test_positive_post():\n \n data = {\n \"id\": 1,\n \"petId\": 111,\n \"quantity\": 1,\n \"shipDate\": \"2023-05-15T14:05:58.400Z\",\n \"status\": \"placed\",\n \"complete\": True\n }\n response = httpx.post(f'{URL}', json=data)\n logging.info(\"create record:{data}\")\n assert response.status_code == 200, \"record not create!\"\n assert response.json()[\"id\"] == data.get(\"id\"), \"record dont have correct data\"\n\n\ndef test_negative_post():\n \n data = {\n \"id\": \"text\",\n \"petId\": 1,\n \"quantity\": 1,\n \"shipDate\": \"2023-05-15T14:05:58.400Z\",\n \"status\": \"placed\",\n \"complete\": True\n }\n response = httpx.post(f'{URL}', json=data)\n assert response.status_code == 500\n assert \"something bad happened\" in response.text\n\n\ndef test_positive_get():\n id = 2\n data = {\n \"id\": id,\n \"petId\": 111,\n \"quantity\": 1,\n \"shipDate\": \"2023-05-15T14:05:58.400Z\",\n \"status\": \"placed\",\n \"complete\": True\n }\n \n create_response = httpx.post(f'{URL}', json=data)\n logging.info(\"create some record\")\n\n response = httpx.get(f'{URL}/{id}')\n logging.info(\"get exist record\")\n assert response.status_code == 200\n assert response.json()[\"id\"] == id\n\n\ndef test_negative_get():\n id = 8\n response = httpx.get(f'{URL}/{id}')\n assert response.status_code == 404\n assert \"Order not found\" in response.text\n\n\ndef test_positive_delete():\n id = 2\n data = {\n \"id\": id,\n \"petId\": 111,\n \"quantity\": 1,\n \"shipDate\": \"2023-05-15T14:05:58.400Z\",\n \"status\": \"placed\",\n \"complete\": True\n }\n create_response = httpx.post(f'{URL}', json=data)\n assert create_response.status_code == 200\n logging.info(\"create some record\")\n\n id = create_response.json()['id']\n delete_url = f'{URL}/{id}'\n delete_response = httpx.delete(delete_url)\n logging.info(\"delete exist record\")\n assert delete_response.status_code == 200\n assert delete_response.json()[\"message\"] == str(id)\n\n get_response = httpx.get(delete_url)\n logging.info(\"check record not exist\")\n assert get_response.status_code == 404, f\"record:{id}, shold be not exist\"\n\ndef test_negative_delete():\n \n id = \"2\"\n delete_url = f'{URL}/{id}'\n delete_response = httpx.delete(delete_url)\n \n assert delete_response.status_code == 404, f\"record:{id}, shold be not exist\"","repo_name":"Polenichko/template-testing-store-","sub_path":"qa/tests/test_pet_store_api.py","file_name":"test_pet_store_api.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25710617822","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport typing\n\nfrom multiprocessing import Pool\nfrom sklearn.neighbors import KDTree\n\n\ndef hausd_interval(\n data: np.ndarray,\n confidenceLevel: float = 0.95,\n subsampleSize: int = -1,\n subsampleNumber: int = 1000,\n pairwiseDist: bool = False,\n leafSize: int = 2,\n ncores: int = 2,\n) -> float:\n \"\"\"\n **Computation of Hausdorff distance based confidence values.**\n\n Measures the confidence between two persistent features, wether they are drawn from\n a distribution fitting the underlying manifold of the data. This function is based on\n the Hausdorff distance between the points.\n\n + param **data**: a data set, type `np.ndarray`.\n + param **confidenceLevel**: confidence level, default `0.95`, type `float`.\n + param **subsampleSize**: size of each subsample, type `int`.\n + param **subsampleNumber**: number of subsamples, type `int`.\n + param **pairwiseDist**: if `true`, a symmetric `nxn`-matrix is generated out of the data, type `bool`.\n + param **leafSize**: leaf size for KDTree, type `int`.\n + param **ncores**: number of cores for parallel computing, type `int`.\n + return **confidence**: the confidence to be a persistent homology class, type `float`.\n \"\"\"\n dataSize = np.size(data, 0)\n\n if subsampleSize == -1:\n subsampleSize = int(dataSize / np.log(dataSize))\n global hausdorff_distance\n\n if pairwiseDist == False:\n\n def hausdorff_distance(subsampleSize: list) -> float:\n \"\"\"\n **Distances between the points of data and a random subsample of data of size `m`.**\n\n + param **subsampleSize**: the size of the data, type `int`.\n + return **hausdorffDistance**: Hausdorff distance, type `float`.\n \"\"\"\n I = np.random.choice(dataSize, subsampleSize)\n Icomp = [item for item in np.arange(dataSize) if item not in I]\n tree = KDTree(data[I,], leaf_size=leafSize)\n distance, ind = tree.query(data[Icomp,], k=1)\n hausdorffDistance = max(distance)\n return hausdorffDistance\n\n with Pool(ncores) as cores:\n distanceVector = cores.map(\n hausdorff_distance, [subsampleSize] * subsampleNumber\n )\n cores.close()\n\n else:\n\n def hausdorff_distance(subsampleSize: list) -> float:\n \"\"\"\n **Distances between the points of data and a random subsample of data of size `m`.**\n\n + param **subsampleSize**: the size of the data, type `int`.\n + return **hausdorffDistance**: Hausdorff distance, type `float`.\n \"\"\"\n I = np.random.choice(dataSize, subsampleSize)\n hausdorffDistance = np.max(\n [np.min(data[I, j]) for j in np.arange(dataSize) if j not in I]\n )\n return hausdorffDistance\n\n with Pool(ncores) as cores:\n distanceVector = cores.map(\n hausdorff_distance, [subsampleSize] * subsampleNumber\n )\n cores.close()\n distanceVector = [i[0] for i in distanceVector]\n\n # Quantile and confidence band.\n myquantile = np.quantile(distanceVector, confidenceLevel)\n confidence = 2 * myquantile\n\n return confidence\n\n\ndef truncated_simplex_tree(simplexTree: np.ndarray, int_trunc: int = 100) -> tuple:\n \"\"\"\n **This function return a truncated simplex tree.**\n\n A sparse representation of the persistence diagram in the form of a truncated\n persistence tree. Speeds up computation on large scale data sets.\n\n + param **simplexTree**: simplex tree, type `np.ndarray`.\n + param **int_trunc**: number of persistent interval kept per dimension, default is `100`, type `int`.\n + return **simplexTreeTruncatedPersistence**: truncated simplex tree, type `np.ndarray`.\n \"\"\"\n simplexTree.persistence()\n dimension = simplexTree.dimension()\n simplexTreeTruncatedPersistence = []\n\n for i in range(dimension):\n dPersistence = simplexTree.persistence_intervals_in_dimension(dimension)\n j = len(dPersistence)\n\n if j > int_trunc:\n dPersistenceTruncated = [dPersistence[i] for i in range(j - int_trunc, j)]\n else:\n dPersistenceTruncated = dPersistence\n simplexTreeTruncatedPersistence = simplexTreeTruncatedPersistence + [\n (i, (l[0], l[1])) for l in dPersistenceTruncated\n ]\n\n return simplexTreeTruncatedPersistence\n","repo_name":"karhunenloeve/NTOPL","sub_path":"persistenceStatistics.py","file_name":"persistenceStatistics.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"36882424495","text":"from question_model import Question\nfrom data import question_data, logo\nfrom quiz_brain import QuizBrain\nquestion_bank = []\n\nfor index in range(0, len(question_data)):\n new_q_answer = question_data[index][\"correct_answer\"]\n new_q_text = question_data[index][\"question\"]\n new_q = Question(new_q_text, new_q_answer)\n question_bank.append(new_q)\n \ncategory = question_data[0][\"category\"]\ndifficulty = question_data[0][\"difficulty\"].capitalize()\n\nprint(f\"{logo}\")\n\nprint(f\"Welcome to quiz.\\nCategory: {category}\\nDifficulty: {difficulty}\")\n\nquiz = QuizBrain(question_bank)\nwhile quiz.still_has_questions():\n quiz.next_question()\n\nprint(\"You have competed the quiz\")\nprint(f\"Your final score was: {quiz.user_score}/{quiz.question_number}\")\n","repo_name":"pawlowski-hub/Quiz-game-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30882667375","text":"# min heap priority queue helper functions: bubble_up and bubble_down\ndef bubble_up(queue, index):\n if index == 0: return \n else:\n parent = (index + 1) // 2 - 1\n if queue[index].distance < queue[parent].distance:\n queue[index], queue[parent] = queue[parent], queue[index]\n bubble_up(queue, parent)\n return\n\ndef bubble_down(queue, index):\n size = len(queue)\n left = index * 2 + 1\n right = index * 2 + 2\n if left >= size: return\n elif right >= size:\n if queue[index].distance > queue[left].distance:\n queue[index], queue[left] = queue[left], queue[index]\n else:\n if queue[left].distance < queue[right].distance:\n smaller = left\n else: smaller = right\n if queue[index].distance > queue[smaller].distance:\n queue[index], queue[smaller] = queue[smaller], queue[index]\n bubble_down(queue, smaller)\n\n# min heap used for priority queue of available parking spots\nclass PriorityQueue:\n def __init__(self, spot_index):\n self.q = []\n for spot in spot_index:\n self.q.append(spot)\n\n def insert(self, spot):\n self.q.append(spot)\n bubble_up(self.q, len(self.q) - 1)\n\n def pop_min(self):\n last = len(self.q) - 1\n self.q[0], self.q[last] = self.q[last], self.q[0]\n spot = self.q.pop()\n bubble_down(self.q, 0)\n return spot","repo_name":"ddkingsley/carpark","sub_path":"app/heapqueue.py","file_name":"heapqueue.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"419915762","text":"from big_ol_pile_of_manim_imports import*\n\nclass Introduction(Scene):\n\tdef construct(self):\n\t\tt1 = TexMobject(r\"Visualzing\\quad Linear\\quad Transformations\")\n\t\tt2 = TexMobject(r\"Using\\quad python\\quad \\& \\quad manim\")\n\t\tt3 = TexMobject(r\"Sameer\\quad Prasad\\quad Subhedar\")\n\t\tt4 = TexMobject(\"PES2201800323\")\n\t\tself.play(Write(t1))\n\t\tself.play(Transform(t1,t2))\n\t\tself.wait()\n\t\tself.play(Transform(t1,t3))\n\t\tself.wait()\n\t\tself.play(Transform(t1,t4))\n\t\tself.wait()\n\nclass AntiClockWiseRotation60Explanation(Scene):\n\tdef construct(self):\n\t\tt1 = TextMobject(\"This is a 60 degree\",\" Anticlockwise Rotation\")\n\t\tt1[1].set_color(YELLOW)\n\t\tself.play(Write(t1))\n\t\tself.wait()\n\nclass ClockWiseRotation30Explanation(Scene):\n\tdef construct(self):\n\t\tt1 = TextMobject(\"This is a 30 degree\",\" Clockwise Rotation\")\n\t\tt1[1].set_color(YELLOW)\n\t\tself.play(Write(t1))\n\t\tself.wait()\n\nclass AntiClockWiseRotation60(LinearTransformationScene):\n\tCONFIG = {\n\t\t\"leave_ghost_vectors\" : True,\n\t\t\"angle\" : np.pi/3,\n\t}\n\tdef construct(self):\n\t\tmatrix = [[np.cos(self.angle),-1*np.sin(self.angle)],[np.sin(self.angle),np.cos(self.angle)]]\n\t\tobject = Dot(color = DARK_BLUE)\n\t\tself.add(object)\n\t\tv = np.array([[2],[1]])\n\t\tself.add_vector(v)\n\t\tself.apply_matrix(matrix)\n\t\tself.wait()\n\n\nclass ClockWiseRotation30(LinearTransformationScene):\n\tCONFIG = {\n\t\t\"leave_ghost_vectors\" : True,\n\t\t\"angle\" : np.pi/6,\n\t}\n\tdef construct(self):\n\t\tmatrix = [[np.cos(self.angle),np.sin(self.angle)],[-1*np.sin(self.angle),np.cos(self.angle)]]\n\t\tobject = Dot(color = DARK_BLUE)\n\t\tself.add(object)\n\t\tv = np.array([[1],[1]])\n\t\tself.add_vector(v)\n\t\tself.apply_matrix(matrix)\n\t\tself.wait()","repo_name":"samsub18/Visualizing-Linear-Transformations","sub_path":"Transformation Scenes/6_Linear_Transformation_Rotation.py","file_name":"6_Linear_Transformation_Rotation.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"1142366308","text":"from pynput import keyboard\nfrom pynput import mouse\nfrom controller import starter, scroll_events, key_events\n\n\ndef on_press(key):\n starter.execute(key)\n if starter.start_status:\n for item in key_events:\n item.execute(key)\n\n\ndef on_release(key):\n pass\n\n\ndef on_scroll(x, y, dx, dy):\n if starter.start_status:\n for item in scroll_events:\n item.execute(dy)\n\n\ndef main():\n with keyboard.Listener(\n on_press=on_press,\n on_release=on_release) as listener:\n with mouse.Listener(\n on_scroll=on_scroll) as mouse_listener:\n starter.add(listener, mouse_listener)\n listener.join()\n mouse_listener.join()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"s724959099/QuickMarcro","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20088474735","text":"\r\nimport random\r\nimport time\r\n\r\nfrom DirectionAlgo import *\r\n\r\nfrom movers import Gargou,Mummy,GuardianMummy\r\n\r\nclass CatacombeScene():\r\n\r\n def __init__(self, view, life, clock, input, score ):\r\n self.Life = life\r\n self.debug = True\r\n self.level = 1\r\n self.clock = clock\r\n self.input = input\r\n self.plate = []\r\n self.score = score\r\n self.view = view\r\n\r\n def Initialize(self):\r\n '''\r\n scene construction\r\n '''\r\n self.Mummy = []\r\n self.guardianMummy = []\r\n self.Gargou = Gargou( self )\r\n self.Gargou.level = 1\r\n self.view.Mummy = self.Mummy\r\n self.view.Guardian = self.guardianMummy\r\n self.view.Gargou = self.Gargou\r\n\r\n # Level-ordered algorithms\r\n self.directionAlgo = [\r\n BlindDirection(),\r\n SillyDirection(),\r\n TargetRandomPathDirection(),\r\n SceneAwareDirection(),\r\n SceneAwareFacingDirection(),\r\n TargetDirection()\r\n ];\r\n\r\n def Start(self ):\r\n '''\r\n scene start\r\n '''\r\n\r\n self.plate = [39 * [0] for i in range(26)]\r\n self.trace = [39 * [0] for i in range(26)]\r\n\r\n self.box = [5 * ['NonTested'] for i in range(4)]\r\n l = str(self.level)\r\n\r\n self.boxchoice = ['Treasure', 'Treasure', 'Treasure', 'Treasure', 'Treasure', 'Treasure', 'Treasure', 'Treasure', 'Treasure', 'Treasure',\r\n 'GuardianMummy'+l, 'Key', 'RoyalMummy', 'Scroll', 'Empty'+l, 'Empty'+l, 'Empty'+l, 'Empty'+l, 'Empty'+l, 'Empty'+l]\r\n random.shuffle(self.boxchoice)\r\n\r\n self.Key = False\r\n self.Scroll = False\r\n self.RoyalMummy = False\r\n\r\n for i in range(3, 24, 5):\r\n for j in range(1, 38):\r\n self.plate[i][j] = 1\r\n self.plate[i+1][j] = 1\r\n for j in range(1, 37, 7):\r\n for i in range(3, 25):\r\n self.plate[i][j] = 1\r\n self.plate[i][j+1] = 1\r\n\r\n self.plate[1][15] = 1\r\n self.plate[1][16] = 1\r\n self.plate[2][15] = 1\r\n self.plate[2][16] = 1\r\n\r\n # birth of a new mummy\r\n self.Mummy.append(Mummy(1, 23, self, self.directionAlgo[self.Gargou.level-1] ))\r\n # mummy position init\r\n i = 1\r\n for mum in self.Mummy:\r\n mum.Restart(1 + 2 * i, 23)\r\n i += 1\r\n\r\n def prepareGraphics(self):\r\n '''\r\n graphics initialisation\r\n '''\r\n self.view.init( self.level, self.Life, self.score, self.plate, self.debug, self.boxchoice)\r\n\r\n def tick(self):\r\n '''\r\n game tick\r\n '''\r\n\r\n if self.input.nextLevel :\r\n self.terminate( 'UpLevel' )\r\n\r\n self.Gargou.move(self.input.direction)\r\n\r\n if self.Gargou.movement.obstacle == False :\r\n # Shall we open a bloc ?\r\n if self.Gargou.movement.y>2:\r\n xx1 = min(4, max(0, (self.Gargou.movement.x - 3) // 7))\r\n yy1 = min(3, max(0, (self.Gargou.movement.y - 5) // 5))\r\n xx2 = min(4, (self.Gargou.movement.x - 1 )// 7)\r\n yy2 = min(3, (self.Gargou.movement.y - 3) // 5)\r\n if self.box[yy1][xx1] == 'NonTested':\r\n self.UpdateClose(xx1, yy1)\r\n if self.box[yy1][xx2] == 'NonTested':\r\n self.UpdateClose(xx2, yy1)\r\n if self.box[yy2][xx1] == 'NonTested':\r\n self.UpdateClose(xx1, yy2)\r\n if self.box[yy2][xx2] == 'NonTested':\r\n self.UpdateClose(xx2, yy2)\r\n\r\n # Guardian mummies\r\n for guardian in self.guardianMummy:\r\n guardian.tick()\r\n if guardian.movable == 0:\r\n if guardian.iteration == 16:\r\n self.Mummy.append(Mummy(guardian.x, guardian.y, self, self.directionAlgo[self.Gargou.level-1] ))\r\n self.plate[guardian.y][guardian.x] = 1\r\n self.plate[guardian.y+1][guardian.x] = 1\r\n self.plate[guardian.y][guardian.x+1] = 1\r\n self.plate[guardian.y+1][guardian.x+1] = 1\r\n self.view.eraseTrace( guardian.x, guardian.y )\r\n self.guardianMummy.remove(guardian)\r\n self.view.remove(guardian)\r\n\r\n # Mummies moves\r\n for mum in self.Mummy:\r\n mum.tick()\r\n\r\n # collision ?\r\n if abs(mum.x - self.Gargou.movement.x) <= 1 and abs(mum.y - self.Gargou.movement.y) <= 1:\r\n # The player don't have the scroll ?\r\n if not self.Scroll:\r\n self.Life.life -= 1\r\n self.Life.render()\r\n if self.Life.life == 0:\r\n self.terminate( 'GameOver' )\r\n else:\r\n self.Scroll = False\r\n self.score.render()\r\n self.Mummy.remove(mum)\r\n self.view.remove(mum)\r\n\r\n def preDispatch(self):\r\n '''\r\n before we quit the scene\r\n '''\r\n\r\n if self.stopEventName == 'UpLevel' :\r\n self.level = max(1, (self.level + 1) % 6)\r\n # Tout les 6 niveau, self.Level revient à 1\r\n if self.level == 1:\r\n self.Gargou.level += 1\r\n for mum in self.Mummy:\r\n self.Mummy.remove(mum)\r\n self.view.remove(mum)\r\n MUMMY = []\r\n\r\n def Stop(self):\r\n '''\r\n quit the scene\r\n '''\r\n self.preDispatch()\r\n for guardian in self.guardianMummy:\r\n self.guardianMummy.remove(guardian)\r\n self.view.remove(guardian)\r\n\r\n def draw(self):\r\n '''\r\n render the scene\r\n '''\r\n self.view.render()\r\n\r\n def getAvailableDirections(self, x, y ):\r\n '''\r\n return the possible direction accordly to the passed coordinates\r\n '''\r\n res = []\r\n if self.plate[y][x+2] == 1 and self.plate[y+1][x+2] == 1: res.append(0)\r\n if self.plate[y-1][x] == 1 and self.plate[y-1][x+1] == 1: res.append(1)\r\n if self.plate[y][x-1] == 1 and self.plate[y+1][x-1] == 1: res.append(2)\r\n if self.plate[y+2][x] == 1 and self.plate[y+2][x+1] == 1: res.append(3)\r\n return res\r\n\r\n def testMovement( self, oldX, oldY, direction ):\r\n '''\r\n return if the movement is possible and the new position coordinates\r\n '''\r\n x = oldX\r\n y = oldY\r\n\r\n if direction == 0: x = oldX + 1\r\n elif direction == 1: y = oldY - 1\r\n elif direction == 2: x = oldX - 1\r\n elif direction == 3: y = oldY + 1\r\n\r\n if self.plate[y][x] == self.plate[y][x+1] == self.plate[y+1][x] == self.plate[y+1][x+1] == 1:\r\n return ( True, x,y )\r\n else:\r\n return ( False, oldX, oldY )\r\n\r\n def UpdateClose(self, x, y ):\r\n '''\r\n return true if the bloc must be opened\r\n '''\r\n for i in range(3 + 5 * y, 10 + 5 * y):\r\n for j in range(1 + 7 * x, 10 + 7 * x):\r\n if self.plate[i][j] == 1 and self.trace[i][j] == 0: return False\r\n self.box[y][x] = self.boxchoice[5 * y + x]\r\n self.view.openBloc( self.box, x, y )\r\n if self.box[y][x] == 'Treasure': self.score.score += 5\r\n elif self.box[y][x] == \"RoyalMummy\":\r\n self.score.score +=50\r\n self.RoyalMummy = True\r\n elif self.box[y][x] == \"Scroll\":\r\n self.Scroll = True\r\n self.score.render()\r\n elif self.box[y][x] == \"Key\":\r\n self.Key = True\r\n elif self.box[y][x] == \"GuardianMummy\"+str(self.level):\r\n self.guardianMummy.append(GuardianMummy(self, x, y ))\r\n self.score.render()\r\n\r\n def terminate(self, code ):\r\n '''\r\n the scene will terminate with the `code` cause\r\n code can be : \"UpLevel\" or \"GameOver\"\r\n '''\r\n self.stopEventName = code\r\n self.stopEvent = True\r\n","repo_name":"AlexandreBrach/OhMummy","sub_path":"CatacombeScene.py","file_name":"CatacombeScene.py","file_ext":"py","file_size_in_byte":8056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"5049515991","text":"from pathlib import Path\nimport os, json, sys, glob\nfrom time import sleep\nfrom threading import Thread, Lock\nfrom PIL import Image\n\nimport pystray\nfrom win10toast import ToastNotifier\n\nJOURNAL_DIR = os.path.join(Path.home(), 'Saved Games', 'Frontier Developments', 'Elite Dangerous')\nCONFIG_DIR = os.path.join(Path.home(), 'AppData', 'local', 'EDWatcher')\nCONFIG_FILE = 'edwatcher.conf'\nCONFIG_PATH = os.path.join(CONFIG_DIR, CONFIG_FILE)\n\n\ndef resource_path(relative):\n try:\n base_path = sys._MEIPASS\n except:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative)\n\n\n\nICON_PATH = resource_path('icon.ico')\nprint(ICON_PATH)\n\nclass DirectoryWatcher:\n\n terminate = False\n\n def __init__(self, dir, set_hook):\n self.dir = dir\n self.terminate = False\n self.set_hook = set_hook\n\n def loop(self):\n while not self.terminate:\n # get last modified file\n current_latest_file = max(glob.glob(os.path.join(self.dir, '*')), key=os.path.getctime)\n self.set_hook(current_latest_file)\n sleep(1)\n print('directory watcher exits')\n\n\nclass FileWatcher:\n\n terminate = False\n\n def __init__(self, path, submit_hook, last_submitted_hook):\n self.path = path\n self.submit_hook = submit_hook\n self.terminate = False\n self.last_submitted_hook = last_submitted_hook\n\n def loop(self):\n while not self.terminate:\n submitted = True\n with open(self.path, 'r') as f:\n for line in f.readlines():\n if not submitted:\n self.submit_hook(line)\n last_submitted, lock = self.last_submitted_hook()\n if line == last_submitted:\n submitted = False\n lock.release()\n sleep(1)\n print('file watcher exits')\n\n\nclass SubmitWatcher:\n\n terminate = False\n\n def __init__(self, set_last_entry_hook, notifier, notify):\n self.submit_entries = []\n self.terminate = False\n self.set_last_entry = set_last_entry_hook\n self.notifier = notifier\n self.notify = notify\n\n def submit(self, entries):\n self.submit_entries = list(entries)\n return self.submit_entries\n\n def loop(self):\n while not self.terminate:\n if len(self.submit_entries) > 0:\n last_entry = None\n for entry in self.submit_entries:\n print('submit entry: %s' % entry)\n last_entry = entry\n if self.notify: self.notifier.show_toast(\n \"EDWatch\",\n \"Submitted %d events catched from ED\" % len(self.submit_entries),\n icon_path=ICON_PATH,\n duration=5\n )\n self.submit_entries = []\n self.set_last_entry(last_entry)\n sleep(5)\n print('submit watcher exits')\n\n\nclass EDWatcher:\n '''\n EDWatcher provides a interface to ED pilots journal and submits data to ED pilots database webapi.\n '''\n\n def __init__(self):\n print('starting ED watcher...')\n\n\n self.terminate = False\n\n # test if config path and file exists\n Path(CONFIG_DIR).mkdir(exist_ok=True, parents=True)\n if not Path(CONFIG_PATH).exists():\n with open(CONFIG_PATH, 'w') as f:\n f.write(json.dumps({\n 'last_submitted': '',\n 'notifications': True,\n }))\n self.conf = None\n try:\n with open(CONFIG_PATH, 'r') as f:\n self.conf = json.loads(f.read())\n except:\n print('ERROR: Can not parse config file.')\n quit(1)\n\n print('last submitted entry was %s' % self.conf['last_submitted'])\n self.watch_file = None\n self.entries_to_submit = []\n self.submit_entry_lock = Lock()\n self.last_submitted_lock = Lock()\n self.file_watcher = None\n self.notifier = ToastNotifier()\n self.submit_watcher = SubmitWatcher(self.update_last_submitted, self.notifier, self.conf['notifications'])\n t = Thread(target=self.submit_watcher.loop)\n self.threads = [t]\n t.start()\n\n icon_image = Image.open(ICON_PATH)\n exit_item = pystray.MenuItem(enabled=True, text='Exit', action=self.exit)\n notification_item = pystray.MenuItem(enabled=True, text='Notifications', action=self.toggle_notifications,\n checked=lambda item: self.conf['notifications'])\n tray_menu = pystray.Menu(exit_item, notification_item)\n self.icon = pystray.Icon(name='EDWatcher', icon=icon_image, title=\"EDWatcher\", menu=tray_menu)\n\n\n\n def toggle_notifications(self, *args, **kwargs):\n self.conf['notifications'] = not self.conf['notifications']\n if self.conf['notifications']:\n state = 'on'\n else:\n state = 'off'\n print('setting notifications to %s' % state)\n\n def add_submit_entry(self, entry):\n self.submit_entry_lock.acquire()\n if entry not in self.entries_to_submit:\n print('adding new submit entry: %s' % entry)\n self.entries_to_submit.append(entry)\n self.submit_entry_lock.release()\n\n def set_watch_file(self, path):\n if self.watch_file != path:\n print('new file watching is: %s' % path)\n self.watch_file = path\n if self.file_watcher:\n print('stopping old file watcher')\n self.file_watcher.terminate()\n print('starting new file watcher')\n self.file_watcher = FileWatcher(path, self.add_submit_entry, self.get_last_submitted)\n t = Thread(target=self.file_watcher.loop)\n self.threads.append(t)\n t.start()\n\n def get_last_submitted(self):\n self.last_submitted_lock.acquire()\n return self.conf['last_submitted'], self.last_submitted_lock\n\n def update_last_submitted(self, obj):\n self.conf['last_submitted'] = obj\n with open(CONFIG_PATH, 'w') as f:\n f.write(json.dumps(self.conf))\n\n def loop(self):\n while not self.terminate:\n self.submit_entry_lock.acquire()\n queued = self.submit_watcher.submit(self.entries_to_submit)\n self.entries_to_submit = [n for n in self.entries_to_submit if n not in queued]\n self.submit_entry_lock.release()\n sleep(10)\n\n def exit(self, *args, **kwargs):\n print('shut down EDWatcher')\n self.terminate = True\n self.directory_watcher.terminate = True\n self.file_watcher.terminate = True\n self.submit_watcher.terminate = True\n print('threads terminated')\n print('joining threads')\n exit_threads = []\n for t in self.threads:\n et = Thread(target=t.join)\n et.start()\n exit_threads.append(et)\n for et in exit_threads:\n if et : et.join()\n print('saving config')\n with open(CONFIG_PATH, 'w') as f:\n f.write(json.dumps(self.conf))\n self.icon.stop()\n\n\n def run(self):\n\n # parse all files for not submitted entries\n files = glob.glob(os.path.join(JOURNAL_DIR, '*'))\n submitted = True\n if self.conf['last_submitted'] == '':\n submitted = False\n for file in files:\n with open(file, 'r') as f:\n for line in f.readlines():\n if not submitted:\n self.add_submit_entry(line)\n if line == self.conf['last_submitted']:\n submitted = False\n\n self.directory_watcher = DirectoryWatcher(JOURNAL_DIR, self.set_watch_file)\n directory_watcher_thread = Thread(target=self.directory_watcher.loop)\n directory_watcher_thread.start()\n self.threads.append(directory_watcher_thread)\n\n # running loop in a thread to start tray icon from main thread so this possibly runs also on mac os\n Thread(target=self.loop).start()\n\n def setup_icon(icon):\n icon.visible = True\n\n # icon.run blocks itself\n self.icon.run(setup_icon)\n print('goodbye')\n sys.exit(0)\n\nif __name__ == '__main__':\n app = EDWatcher()\n app.run()\n","repo_name":"superdachs/EDWatcher","sub_path":"edsm_log_bridge_daemon.py","file_name":"edsm_log_bridge_daemon.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"7900197720","text":"import click\nfrom click.testing import CliRunner\n\nfrom tests.context import errors, songtext\n\n\nrunner = CliRunner()\n\n\nclass TestLyricWiki:\n\n def test_get_single_result_with_artist_and_song_title(self):\n result = runner.invoke(songtext.cli, [\n '-a', 'Paramore',\n '-t', 'Where the Lines Overlap',\n '--no-pager'\n ])\n assert result.exit_code == 0\n assert 'Paramore: Where The Lines Overlap' in result.output\n\n def test_error_with_artist_only(self):\n result = runner.invoke(songtext.cli, ['-a', 'Die Antwoord'])\n assert isinstance(result.exception, errors.ArgumentError)\n\n def test_error_with_song_title_only(self):\n result = runner.invoke(songtext.cli, ['-t', 'get lucky'])\n assert isinstance(result.exception, errors.ArgumentError)\n\n def test_searching_with_positional_arguments_fails(self):\n result = runner.invoke(songtext.cli, ['joy division atmosphere'])\n assert isinstance(result.exception, errors.ArgumentError)\n\n def test_searching_with_words_option_fails(self):\n result = runner.invoke(songtext.cli, ['-w', 'you want a bugatti'])\n assert isinstance(result.exception, errors.ArgumentError)\n","repo_name":"ysim/songtext","sub_path":"tests/test_api_integrations.py","file_name":"test_api_integrations.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"}
+{"seq_id":"35850438280","text":"# encoding=utf-8\n\nfrom multiprocessing import Process\nfrom time import time, sleep\nimport os\n\n\ndef f(n):\n sleep(1)\n print(n * n, 'ProcessId:%d' % (os.getpid()))\n\n\ndef main():\n print('ParentProcessId:%d' % os.getpid())\n ps = []\n for i in range(8):\n p = Process(target=f, args=(i,))\n p.start()\n ps.append(p)\n # p.join() # single process\n # p.terminate() # kill process\n for p in ps:\n p.join()\n\n\nif __name__ == '__main__':\n start_time = time()\n main()\n end_time = time()\n print('Consuming time seconds:%d' % (end_time - start_time))\n","repo_name":"feizhihui/LeetCode","sub_path":"snippet_code/thread/multi_print.py","file_name":"multi_print.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33895970467","text":"import sys, pickle\nimport psycopg2\nfrom psycopg2.extras import execute_values\nfrom parser_types import *\nfrom secrets import ANDIN_MIGRATE_PASS\nfrom typing import List, Tuple, Dict\n\ndef pickle_load(file: str):\n with open(file, 'rb') as f:\n data: ParsedData = pickle.load(f)\n return data\n\ndef insert_import(cur, name):\n cur.execute(\"\"\"\n INSERT INTO import (date, script)\n VALUES\n (NOW(), %s)\n RETURNING id;\n \"\"\", (name, ))\n import_id = cur.fetchone()[0]\n print(f'Inserted import {import_id}')\n return import_id\n\ndef insert_osm_metas(cur, osm_meta: List[OsmMeta]):\n vals = [(m.osm_id, m.osm_type, m.osm_version) for m in osm_meta]\n execute_values(cur, \"\"\"\n INSERT INTO osm_element (osm_id, osm_type, osm_version)\n VALUES\n %s\n RETURNING id;\n \"\"\", vals)\n osm_element_ids = cur.fetchall()\n print(f'Inserted {len(osm_element_ids)} osm_element rows')\n return osm_element_ids\n\ndef insert_data_sources(cur, osm_element_ids: List[int], import_id: int):\n vals = [(osm_element_id, import_id) for osm_element_id in osm_element_ids]\n execute_values(cur, \"\"\"\n INSERT INTO data_source (osm, import)\n VALUES\n %s\n RETURNING id;\n \"\"\", vals)\n data_source_ids = cur.fetchall()\n print(f'Inserted {len(data_source_ids)} data_source rows')\n return data_source_ids\n\ndef insert_buildings_db(cur, buildings_with_fks: List[Tuple[Building, Tuple[int, int]]]):\n buildings_with_fks = list(buildings_with_fks)\n vals = [(b.geometry.wkb, d, a) for b, (d, a) in buildings_with_fks]\n execute_values(cur, \"\"\"\n INSERT INTO building (geometry, data_source, address)\n VALUES\n %s\n RETURNING id;\n \"\"\", vals, template=\"(ST_GeomFromWKB(%s), %s, %s)\")\n building_ids = cur.fetchall()\n print(f'Inserted {len(building_ids)} building rows')\n return building_ids\n\ndef insert_building_addresses(cur, buildings: List[Building]):\n with_address = [b for b in buildings if b.address is not None]\n vals = [(b.address.free, b.address.locality, b.address.region, b.address.postcode, b.address.country) for b in with_address]\n execute_values(cur, \"\"\"\n INSERT INTO address (free, locality, region, postcode, country)\n VALUES\n %s\n RETURNING id;\n \"\"\", vals)\n with_address_ids = dict(zip(with_address, cur.fetchall()))\n address_ids = [with_address_ids.get(b) for b in buildings]\n print(f'Inserted {len(address_ids)} building rows')\n return address_ids\n\ndef insert_buildings(cur, buildings: List[Building], import_id: int):\n metas = [b.osm_meta for b in buildings]\n osm_element_ids = insert_osm_metas(cur, metas)\n data_source_ids = insert_data_sources(cur, osm_element_ids, import_id)\n address_ids = insert_building_addresses(cur, buildings)\n zipped = zip(buildings, zip(data_source_ids, address_ids))\n building_ids = insert_buildings_db(cur, zipped)\n building_ids_dict = dict(zip(buildings, building_ids))\n return building_ids_dict\n\ndef insert_data_source_rooms(cur, room_data_source_list: List[Tuple[Room, int]], building_ids: Dict[Building, int]):\n vals = [(r.geometry.wkb, r.level, r.name, r.ref, building_ids[r.building], d) for r, d in room_data_source_list]\n execute_values(cur, \"\"\"\n INSERT INTO room (geometry, level, name, ref, building, data_source)\n VALUES\n %s\n RETURNING id;\n \"\"\", vals, template=\"(ST_GeomFromWKB(%s), %s, %s, %s, %s, %s)\")\n room_ids = cur.fetchall()\n print(f'Inserted {len(room_ids)} room rows')\n return room_ids\n\ndef insert_rooms(cur, rooms: List[Room], building_ids: Dict[Building, int], import_id: int):\n metas = [b.osm_meta for b in rooms]\n osm_element_ids = insert_osm_metas(cur, metas)\n data_source_ids = insert_data_sources(cur, osm_element_ids, import_id)\n room_ids = dict(zip(rooms, insert_data_source_rooms(cur, zip(rooms, data_source_ids), building_ids)))\n return room_ids\n\n\nif __name__ == \"__main__\":\n data = pickle_load(sys.argv[1])\n conn = psycopg2.connect(host=\"localhost\", database=\"andin_dev\", user=\"andin_migrate\", password=ANDIN_MIGRATE_PASS)\n cur = conn.cursor()\n import_id = insert_import(cur, 'osm')\n building_ids = insert_buildings(cur, data.buildings, import_id)\n room_ids = insert_rooms(cur, data.rooms, building_ids, import_id)\n conn.commit()\n cur.close()\n conn.close()","repo_name":"ubipo/andin-db","sub_path":"pickle2db.py","file_name":"pickle2db.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"43056805707","text":"# %% Imports\n\nimport pyodbc\nimport pandas as pd\nimport os\nfrom collections import defaultdict\nimport hashlib\n\n# %% Function to retrieve list of databases\n\n\ndef get_db_files(top_dir: str, file_ext):\n \"\"\"\n Return dataframe of information for files with specified extensions found\n under starting directory.\n\n Parameters\n ----------\n top_dir : str\n directory to begin search\n file_ext : list, tuple, or str\n file extensions to include in search\n\n Returns\n -------\n dataframe\n pandas dataframe with id hash index, file name, directory string, and full absolute\n path\n \"\"\"\n\n df_files = pd.DataFrame(\n columns=[\"db_identifier\", \"file_name\", \"file_dir\", \"file_path\"]\n )\n\n for root, _, files in os.walk(top_dir):\n for file in files:\n if file.endswith(file_ext):\n new_row = pd.Series(\n {\n \"file_name\": file,\n \"file_dir\": root,\n \"file_path\": os.path.join(root, file),\n \"db_identifier\": hashlib.md5(\n os.path.join(root, file).encode(\n encoding=\"UTF-8\", errors=\"strict\"\n )\n ).hexdigest(),\n }\n )\n df_files = pd.concat(\n [df_files, new_row.to_frame().T], ignore_index=True\n )\n\n return df_files\n\n\n# %% Function to open ODBC database and return connection and cursor\n\n\ndef odbc_connect_ms_access(dbq_path: str):\n \"\"\"\n Returns pyodbc connection and cursor for a Microsoft Access database.\n\n Parameters\n ----------\n dbq_path : str\n full absolute file path of MS Access database\n\n Returns\n -------\n obj, obj\n odbc connection, odbc connection cursor\n \"\"\"\n\n # Required Microsoft Access ODBC driver\n odbc_driver = \"{Microsoft Access Driver (*.mdb, *.accdb)}\"\n\n # ODBC connection string\n conn_str = rf\"DRIVER={odbc_driver};\" rf\"DBQ={dbq_path};\"\n\n # Open ODBC connection and cursor\n conn = pyodbc.connect(conn_str)\n cur = conn.cursor()\n\n # Workaround for MS Access ODBC \"utf-16-le\" error\n def decode_bad_utf16(raw_string):\n s = raw_string.decode(\"utf-16le\", \"ignore\")\n try:\n n = s.index(\"\\u0000\")\n s = s[:n] # null terminator\n except:\n pass\n return s\n\n conn.add_output_converter(pyodbc.SQL_WVARCHAR, decode_bad_utf16)\n\n return conn, cur\n\n\n# %% Function to extract database schema\n\n\ndef extract_ms_access_db_schema(file_path: str):\n \"\"\"\n Extracts table schema from Microsoft Access database using pyodbc.\n\n Parameters\n ----------\n file_path : str\n full absolute file path of MS Access database\n\n Returns\n -------\n dict\n dictionary of table definitions\n \"\"\"\n\n if not file_path.endswith((\".accdb\", \".mdb\")):\n return\n\n db_table_defs = defaultdict(dict)\n\n db_conn, db_cursor = odbc_connect_ms_access(file_path)\n\n db_table_names = [\n t.table_name\n for t in db_cursor.tables(tableType=\"TABLE\")\n # Exclude MS Access generated tables\n if not (t.table_name in [\"Paste Errors\", \"Switchboard Items\"])\n ]\n\n for curr_table in db_table_names:\n db_table_defs[curr_table] = {}\n\n db_table_defs[curr_table][\"unique_indices\"] = {}\n\n for s in db_cursor.statistics(table=curr_table, unique=True):\n if s.index_name:\n if s.index_name in db_table_defs[curr_table][\"unique_indices\"]:\n db_table_defs[curr_table][\"unique_indices\"][s.index_name].append(\n s.column_name\n )\n else:\n db_table_defs[curr_table][\"unique_indices\"][s.index_name] = [\n s.column_name\n ]\n\n db_table_defs[curr_table][\"column_defs\"] = {}\n\n for col in db_cursor.columns(table=curr_table):\n db_table_defs[curr_table][\"column_defs\"][col.column_name] = {\n \"data_type_name\": col.type_name,\n \"sql_data_type\": col.sql_data_type,\n \"is_nullable\": col.is_nullable,\n }\n\n db_conn.close()\n\n return dict(db_table_defs)\n\n\n# %% Function to return pandas df of table columns definitions\n\n\ndef extract_db_table_def_df(id: str, db: dict):\n \"\"\"\n Create pandas data frame of database table definitions\n\n Parameters\n ----------\n id : str\n Unique database identifier\n db : dict\n Dictionary containing database schema information retrieved from\n extract_ms_access_db_schema\n\n Returns\n -------\n object\n Returns pandas data frame of database identifier, table name, list of\n unique indices, and list of table columns\n \"\"\"\n df_table_def = pd.DataFrame()\n\n db_tables = [t for t in db.keys()]\n\n for tab in db_tables:\n new_def = pd.Series(\n {\n \"db_id\": id,\n \"db_table\": tab,\n \"db_table_columns\": tuple(\n [col for col in db[tab][\"column_defs\"].keys()]\n ),\n \"db_table_primary_key\": tuple(\n [\n v\n for k, v in db[tab][\"unique_indices\"].items()\n if (k == \"PrimaryKey\")\n ]\n ),\n }\n )\n df_table_def = pd.concat(\n [df_table_def, new_def.to_frame().T], ignore_index=True\n )\n\n return df_table_def\n","repo_name":"archaeojsc/2022_Database_Migration","sub_path":"db_utilities_extraction.py","file_name":"db_utilities_extraction.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33523604419","text":"from fastapi import status\nfrom httpx import AsyncClient\nfrom tortoise.contrib import test\n\nfrom main import app\nfrom app.api.api_v1 import settings\n\nfrom app.api.api_v1.models.tortoise import Person, Comment, Vote\n\nTORTOISE_TEST_DB = getattr(settings, \"TORTOISE_TEST_DB\", \"sqlite://:memory:\")\nBASE_URL = \"http://127.0.0.1:8000\"\n\n\nclass TestPersonAPi(test.TestCase):\n async def test_root(self):\n async with AsyncClient(app=app, base_url=BASE_URL) as ac:\n response = await ac.get(\"/\")\n assert response.status_code == status.HTTP_200_OK\n assert response.json() == {\n \"detail\": \"Welcome to FastAPI\",\n \"apis\": [\"/api/v1/users\", \"/api/v1/comments\", \"/api/v1/votes\"],\n \"fake_data\": \"/data\",\n \"docs\": [\"/docs\", \"/redoc\"],\n \"openapi\": \"/openapi.json\",\n }\n\n async def test_loading_data(self):\n quantity_data = 4\n # load fake data\n async with AsyncClient(app=app, base_url=BASE_URL) as ac:\n response = await ac.get(\"/data\", params={\"quantity\": quantity_data})\n assert response.status_code == 201\n assert response.json() == {\n \"success\": True,\n \"detail\": \"Data loaded\",\n \"home\": \"/\",\n }\n assert await Person.all().count() == quantity_data\n assert await Comment.all().count() == quantity_data\n assert await Vote.all().count() == quantity_data\n","repo_name":"meschac38700/Test-FastAPI","sub_path":"app/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73734538729","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom convert_image_to_array import convertImageToGSLIBFile, convertImageToHardData\nimport mpslib as mps\nfrom pre_process_data import ContrastStretching\n\n\nclass GenerateImage():\n def __init__(self):\n self.images = ['./imgs/first_scale181a.tif', './imgs/second_scale_1196a.tif']\n # # Choose to compute entropy\n # self.mpslib.par['do_entropy'] = 1\n self.image = []\n self.time = []\n \n def convert_histogram(self):\n for index, i in enumerate(self.images):\n new_image = ContrastStretching(i, str(index + 1) + '_scale')\n new_image_with_3_channels = new_image.add_channels()\n new_image_percentile_stretching = new_image.quantile_transform(new_image_with_3_channels)\n new_image.save_image(new_image_percentile_stretching)\n\n def create_TI_file(self):\n self.ti = './imgs/2_scale.tif'\n self.dat_ti = convertImageToGSLIBFile(self.ti)\n self.original_ti = mps.eas.read('ti.dat')\n \n def configure_MPS_method(self):\n self.first_scale = './imgs/1_scale.tif'\n # Initialize MPSlib using mps_genesim algorithm, and seetings\n self.mpslib = mps.mpslib(method='mps_genesim')\n self.mpslib.par['simulation_grid_size']=np.array([30*8.92, 30*8.92, 1])\n self.mpslib.par['grid_cell_size']=np.array([3.771*10**(-6),3.771*10**(-6),1])\n self.ncond = np.array([i for i in range(0, 100, 20)])\n self.mpslib.par['n_real'] = 1\n self.mpslib.par['ti_fnam'] = './ti.dat'\n self.mpslib.par['n_threads'] = 4\n self.mpslib.d_hard = convertImageToHardData(self.first_scale)\n return self.mpslib\n \n def saveFigure(self):\n fig1 = plt.figure(figsize=(5, 5))\n plt.imshow(np.transpose(np.squeeze(self.original_ti['Dmat'])))\n fig1.savefig('./results_fig/original.png')\n plt.close(fig1)\n for index, ncond in enumerate(self.ncond):\n fig = plt.figure(figsize=(5, 5))\n plt.imshow(np.transpose(np.squeeze(self.image[index])))\n plt.title('CPU time = %.1f' % (self.time[index]) + 's')\n plt.imsave('./results_fig/' + 'Figure_ncond_' + str(ncond) + '.png', np.transpose(np.squeeze(self.image[index])), cmap='gray')\n plt.close(fig)\n\n def generateFigureTime(self):\n fig2 = plt.figure(figsize=(5, 5))\n plt.plot(self.ncond,self.time,'.')\n plt.grid()\n plt.xlabel('n_cond')\n plt.ylabel('simulation time (s)')\n fig2.savefig('./results_fig/' + 'Figure_ncond_time' + '.png')\n plt.close(fig2)\n\n\n def run(self):\n for ncond in self.ncond:\n self.mpslib.par['n_cond'] = ncond\n self.mpslib.run_parallel()\n self.image.append(self.mpslib.sim[-1])\n self.time.append(self.mpslib.time)\n return\n\nif __name__ == \"__main__\":\n image = GenerateImage()\n image.convert_histogram()\n image.create_TI_file()\n image.configure_MPS_method()\n image.run()\n image.saveFigure()\n image.generateFigureTime()\n","repo_name":"ingridbertin/two_scales_fusion_2d","sub_path":"run_parallel.py","file_name":"run_parallel.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30491853370","text":"import urllib.request\nfrom datetime import date, timedelta, datetime as dt\nimport time\n \nurl = \"https://form.office.naver.com/form/responseView.cmd?formkey=MGNjNjIxN2UtZmJiYy00ZmM4LWFlNGUtMTVlMmQ3ZTNkMDVh&sourceId=urlshare\"\n\nserverDate = urllib.request.urlopen(url).headers['Date']\nprint('서버시간: ',serverDate)\n\n#타임스탬프 바꾸기\n#timestamp = (time.mktime(time.strptime(serverDate, '%a, %d %b %Y %H:%M:%S %Z')))\n\ndatetime_server = dt.strptime(serverDate, '%a, %d %b %Y %H:%M:%S %Z')\ndatetime_local = dt.now()\n\nprint((datetime_local.second-datetime_server.second))\n\nwhile True:\n serverDate = urllib.request.urlopen(url).headers['Date']\n datetime_server = dt.strptime(serverDate, '%a, %d %b %Y %H:%M:%S %Z')\n print('서버시간: ',datetime_server)\n time.sleep(0.1)\n\nprint('로컬시간: ',datetime_local)\n\n#print(type(date-serverDate), '초')\n\n","repo_name":"khmdevmem/selenium-python","sub_path":"server-time.py","file_name":"server-time.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"39183240721","text":"import sys\nimport os\n\nHOME = os.environ['HOME']\nsys.path.insert(1, HOME + '/github/StreamingSVM')\nfrom comms import Communication\nimport numpy as np\nimport time\n\nclass SendRecvExample:\n comms = Communication.Communication()\n\n def get_data(self,rank):\n if(rank==0):\n return np.array([1,1], dtype='i')\n if (rank == 1):\n return np.array([2, 1], dtype='i')\n if (rank == 2):\n return np.array([3, 1], dtype='i')\n if (rank == 3):\n return np.array([4, 2], dtype='i')\n\n\n def example(self):\n world_rank = self.comms.comm.Get_rank()\n world_size = self.comms.comm.Get_size()\n dsize = 2\n source = 0\n input = np.array([0, 0], dtype='i')\n master_input = np.array([0, 0], dtype='i')\n count = 0\n partner_rank = 1\n dest = -1\n p = np.random.randint(4)\n exec_time = 0\n exec_time -= time.time()\n max_itr = 10000\n for i in range(0,max_itr):\n if (world_rank == 0):\n\n if (world_size == 1):\n dest = world_rank\n else:\n dest = world_rank + 1\n master_input = self.get_data(world_rank)\n # np.asarray(master_input, dtype='i')\n self.comms.send(input=master_input, dtype=self.comms.mpi.INT, dest=dest, tag=0)\n\n if (world_size == 1):\n source = world_rank\n else:\n source = world_size - 1\n\n data = self.comms.recv(source=source, dtype=self.comms.mpi.INT, tag=0, size=dsize)\n #print(\"I am Master \" + str(world_rank) + \", I received from \" +str(source) +\" : \", data)\n\n else:\n source = world_rank - 1\n data = self.comms.recv(source=source, dtype=self.comms.mpi.INT, tag=0, size=dsize)\n #print(\"I am slave \" + str(world_rank) + \", I received from \" +str(source) +\" : \", data)\n data = self.get_data(world_rank)\n #print(\"I am slave \" + str(world_rank) + \", I sent \", data)\n dest = (world_rank + 1) % world_size;\n # np.asarray(data, dtype='i')\n self.comms.send(input=data, dtype=self.comms.mpi.INT, dest=dest, tag=0)\n\n exec_time += time.time()\n if(i==(max_itr-1)):\n if(world_rank==0):\n exec_time = exec_time / 10\n print(\"Execution Time : \", exec_time)\n\n\n def example1(self):\n world_rank = self.comms.comm.Get_rank()\n world_size = self.comms.comm.Get_size()\n dsize = 2\n source = 0\n input = np.array([0, 1, 2, 3, 4], dtype='i')\n master_input = np.array([0, 1, 2, 3, 4], dtype='i')\n count = 0\n partner_rank = 1\n dest = 0\n\n if(world_rank == 0) :\n if(source == 0):\n print('Starting the programme ...', master_input)\n\n if(world_size == 1):\n dest = world_rank\n else:\n dest = world_rank + 1\n master_input = master_input * (world_rank + 1)\n #np.asarray(master_input, dtype='i')\n self.comms.send(input=master_input, dtype=self.comms.mpi.INT, dest=dest, tag=0)\n\n if(world_size == 1):\n source = world_rank\n else:\n source = world_size - 1\n\n data = self.comms.recv(source=source, dtype=self.comms.mpi.INT, tag=0, size=dsize)\n print(\"I am Master \" + str(world_rank) + \", I received : \", data)\n\n else:\n source = world_rank - 1\n data = self.comms.recv(source=source, dtype=self.comms.mpi.INT, tag=0, size=5)\n print(\"I am slave \" + str(world_rank) + \", I received : \", data)\n data = data * world_rank\n print(\"I am slave \" + str(world_rank) + \", I sent \", data)\n dest = (world_rank + 1) % world_size;\n #np.asarray(data, dtype='i')\n self.comms.send(input=data, dtype=self.comms.mpi.INT, dest=dest, tag=0)\n\n\n def example2(self):\n comms = Communication.Communication()\n max_itr = 10000\n rank = self.comms.comm.Get_rank()\n size = self.comms.comm.Get_size()\n\n input = np.array(rank, dtype='i')\n\n print(\"Simple: Input :\" + str(input) + \" From Rank : \" + str(rank))\n\n # initialize the numpy arrays that store the results from reduce operation\n # output_max = np.array(0, 'i')\n exec_time = 0\n exec_time -= time.time()\n for i in range(0, max_itr):\n output_sum = np.array(0, 'i')\n\n # perform reduction based on sum and maximum\n # self.comms.allreduce(input=input, output=output_max, op=self.comms.mpi.MAX, dtype=self.comms.mpi.INT)\n self.comms.allreduce(input=input, output=output_sum, op=self.comms.mpi.SUM, dtype=self.comms.mpi.INT)\n\n if (rank == 0):\n k=1\n # print(\"Simple: Output Max : \" + str(output_max) + \", from Rank \" + str(rank) + \"\\n\")\n #print(\"Simple: Output Sum : \" + str(output_sum) + \", from Rank \" + str(rank) + \"\\n\")\n exec_time += time.time()\n if (i == (max_itr - 1)):\n if (rank == 0):\n exec_time = exec_time / 10\n print(\"Execution Time : \", exec_time)\n\n\n\n\n\nex = SendRecvExample()\nex.example()\n","repo_name":"vibhatha/PSGDSVMPY","sub_path":"examples/comms/RingExample.py","file_name":"RingExample.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"39787882845","text":"from django.contrib import admin\nfrom edc_model_admin.model_admin_audit_fields_mixin import audit_fieldset_tuple\n\nfrom ..admin_site import td_infant_admin\nfrom ..forms import InfantBirthForm\nfrom ..models import InfantBirth\nfrom .model_admin_mixins import ModelAdminMixin\n\n\n@admin.register(InfantBirth, site=td_infant_admin)\nclass InfantBirthAdmin(ModelAdminMixin, admin.ModelAdmin):\n\n form = InfantBirthForm\n\n fieldsets = (\n (None, {\n 'fields': [\n 'subject_identifier',\n 'report_datetime',\n 'first_name',\n 'initials',\n 'dob',\n 'gender']}\n ), audit_fieldset_tuple)\n\n list_display = (\n 'report_datetime',\n 'first_name',\n 'initials',\n 'dob',\n 'gender',\n )\n\n search_fields = ['infant_visit__subject_identifier', ]\n\n list_display = ('report_datetime', 'first_name')\n list_filter = ('gender',)\n radio_fields = {'gender': admin.VERTICAL}\n","repo_name":"tshilo-dikotla/td-infant","sub_path":"td_infant/admin/infant_birth_admin.py","file_name":"infant_birth_admin.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"75336509608","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n# from webdriver_manager.op\n\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC, wait\nimport time\nimport pandas as pd\n\ndriver=webdriver.Chrome(ChromeDriverManager().install())\ndriver.get('https://www.asianpaints.com/catalogue/colour-catalogue.html')\n\na=1\nc_va=[]\nwhile a<9:\n time.sleep(0.3)\n try:\n btn=driver.find_element_by_css_selector(\"button.color-catalogue-revamp-list--loadMoreBtn\")\n driver.execute_script(\"arguments[0].click();\", btn)\n a=a+1\n print(a)\n print('clicked')\n # rgb_value=driver.find_element_by_class_name('color-catalogue-revamp-list--card').value_of_css_property('background-color')\n # c_va.append(rgb_value)\n except NoSuchElementException:\n print('exist')\n break\nrgb_value=driver.find_element_by_class_name('color-catalogue-revamp-list--card').value_of_css_property('background-color')\nprint(rgb_value)\ncolor_name=driver.find_elements_by_class_name('color-catalogue-revamp-list--colorName')\ncolor_code=driver.find_elements_by_class_name('color-catalogue-revamp-list--colorCode')\nprint(c_va)\nsource=driver.page_source\n\nfor i,j in zip(color_name,color_code):\n print(i.text,j.text)\n# job_details_df = pd.DataFrame(jobdetails)\n# job_details_df.columns = ['title', 'company', 'location', 'summary', 'publish_date']\nprint(source)","repo_name":"rj1278/rj","sub_path":"HowIsIt/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1150511632","text":"# подключаемые библиотеки\nimport socket # для работы с сокетами\nimport tkinter as tk # для создания графического интерфейса\nfrom threading import Thread # для работы с потоками\nfrom datetime import datetime # для работы с временем\n\nSERVER_HOST = \"0.0.0.0\" # ip\nSERVER_PORT = 5002 # port\nPASSWORD = \"PASSWORD\" # пароль сервера\n\nMAX_USERNAME_LENGTH = 30 # максимальная длина имени пользователя\n\n\nconnected_users = {} # словарь для подключаемых пользователей, нужен для хранения сокета и имени\ns = socket.socket() # сокет сервера\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #\ns.bind((SERVER_HOST, SERVER_PORT)) # привязывает сервер к определенному IP-адресу и порту\ns.listen(5) # прослушивание входящих сообщений\nprint(f\"Server {SERVER_HOST}:{SERVER_PORT} is running...\") # вывод в консоль текста, что сервер начал работать\n\n\n# основная функция для прослушивания пользователей, открывается потоком для каждого нового пользователя\ndef listen_for_client(client_socket):\n password = client_socket.recv(1024).decode('cp1251') # принимаем пароль от пользователя\n while password != PASSWORD: # если отправлен неправильный пароль, то сообщаем об этом и ждём правильный\n client_socket.send(\"wrong password\".encode()) # отправляет сообщение пользователю, что пароль неправльный\n password = client_socket.recv(1024).decode('cp1251') # принимает пароль от пользователя ещё раз\n name = client_socket.recv(1024).decode('cp1251') # принимает имя от пользователя\n name = name[:MAX_USERNAME_LENGTH] #\n connected_users[client_socket] = name\n update_users_list()\n\n update_chat_text(f\"{name} has joined the chat.\\n\", color=\"green\")\n\n while True: # цикл для отправки сообщения пользователям от других пользователей\n try:\n message = client_socket.recv(1024).decode('cp1251')\n if not message:\n break\n\n if is_private_message(message):\n recipient, message = extract_private_message(message)\n send_private_message(connected_users[client_socket], recipient, message)\n else:\n broadcast_message(connected_users[client_socket], message)\n update_chat_text(f\"{connected_users[client_socket]}: {message}\\n\")\n except ConnectionResetError:\n break\n\n disconnect_client(client_socket)\n\n\n# проверяет приватное ли сообщение\ndef is_private_message(message):\n return message.startswith(\"@\")\n\n\ndef extract_private_message(message):\n recipient, message = message[1:].split(\" \", 1)\n return recipient, message\n\n\ndef send_private_message(sender_name, recipient, message):\n for client_socket, name in connected_users.items():\n if name == recipient:\n client_socket.send(f\"(Private) {sender_name}: {message}\".encode())\n break\n\n\ndef broadcast_message(sender_name, message):\n print(f\"{sender_name}: {message}\")\n for client_socket, name in connected_users.items():\n if name != sender_name:\n client_socket.send(f\"{sender_name}: {message}\".encode())\n else:\n client_socket.send(f\"{name}: {message}\".encode())\n\n\ndef disconnect_client(client_socket):\n name = connected_users[client_socket]\n del connected_users[client_socket]\n client_socket.close()\n print(f\"[-] {name} disconnected\")\n update_users_list()\n update_chat_text(f\"{name} has left the chat. \\n\", color=\"red\")\n\n\ndef update_users_list():\n users_list.delete(0, tk.END)\n for name in connected_users.values():\n users_list.insert(tk.END, name)\n\n\ndef get_connected_users():\n return f\"[{', '.join(connected_users.values())}]\"\n\n\ndef send_message(event=None):\n message = entry_message.get()\n if message:\n broadcast_message(\"Server\", message)\n update_chat_text(f\"Server: {message}\\n\")\n entry_message.delete(0, tk.END)\n\n\ndef update_chat_text(text, color=None):\n time_stamp = datetime.now().strftime('%H:%M:%S')\n formatted_text = f\"{time_stamp:>8} | {text}\"\n chat_text.config(state=tk.NORMAL)\n\n if color:\n chat_text.tag_configure(color, foreground=color)\n chat_text.insert(tk.END, formatted_text, color)\n else:\n chat_text.insert(tk.END, formatted_text)\n\n chat_text.see(tk.END)\n chat_text.config(state=tk.DISABLED)\n\n\ndef on_closing():\n s.close()\n root.destroy()\n\n\nroot = tk.Tk()\nroot.title(\"Chat Server\")\nroot.minsize(800, 200)\n\n# Create a grid layout with 3 columns and 2 rows\nroot.columnconfigure(0, weight=1, minsize=200) # First column expands with window width\nroot.columnconfigure(1, weight=1) # Second column expands with window width\nroot.columnconfigure(2, weight=0) # Third column does not expand\nroot.rowconfigure(0, weight=1) # First row expands with window height\nroot.rowconfigure(1, weight=0) # Second row does not expand\n\nusers_frame = tk.Frame(root)\nusers_list = tk.Listbox(users_frame, width=20, bd=0)\nusers_list.pack(fill=tk.BOTH, expand=True, padx=10, pady=(13, 3))\nusers_frame.grid(row=0, column=0, sticky=tk.NSEW)\n\nchat_frame = tk.Frame(root)\nchat_text = tk.Text(chat_frame, state=tk.DISABLED)\nchat_text.pack(fill=tk.BOTH, expand=True, pady=(10, 0))\nchat_frame.grid(row=0, column=1, sticky=tk.NSEW)\n\nentry_message = tk.Entry(root)\nentry_message.grid(row=1, column=1, columnspan=1, sticky=tk.EW)\nentry_message.bind(\"\", send_message)\n\nsend_button = tk.Button(root, text=\"Send\", command=send_message)\nsend_button.grid(row=1, column=2, padx=(0, 3), pady=(0, 5))\n\n# Закрытие окна\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n\n# Запускаем сервер в отдельном потоке\ndef start_server():\n while True:\n client_socket, client_address = s.accept()\n t = Thread(target=listen_for_client, args=(client_socket, ))\n t.daemon = True\n t.start()\n\n\nThread(target=start_server).start()\n\n# Запускаем графический интерфейс\nroot.mainloop()\n","repo_name":"SUAI-K14/S2023-Team1","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"38705822277","text":"def isSymmetric(root: TreeNode) -> bool:\n if root == None:\n return True\n stack = [root.left, root.right]\n while stack:\n tmp2 = stack.pop()\n tmp1 = stack.pop()\n if not tmp1 and not tmp2:\n continue\n if not tmp1 or not tmp2:\n return False\n if tmp1.val != tmp2.val:\n return False\n stack.append(tmp1.left)\n stack.append(tmp2.right)\n stack.append(tmp1.right)\n stack.append(tmp2.left)\n return True\n","repo_name":"2095831935/LeetCode","sub_path":"101_3.py","file_name":"101_3.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41464271912","text":"from ArraysModel import ArraysModel\nfrom ArraysView import ArraysView\n\n\nclass ArraysPresenter:\n\n def __init__(self):\n self.model = ArraysModel()\n self.array1 = []\n self.array2 = []\n self.array3 = []\n self.view = ArraysView(self)\n self.view.start()\n\n def on_generator_button_pressed(self, button_number: int):\n \"\"\"Если нажата кнопка, генерируем массив и выводим\"\"\"\n if button_number == 1:\n self.array1 = self.model.generate_random_array()\n self.view.change_array(self.array1, 1)\n elif button_number == 2:\n self.array2 = self.model.generate_random_array()\n self.view.change_array(self.array2, 2)\n else:\n self.array3 = self.model.join_arrays(self.array1, self.array2)\n self.view.change_array(self.array3, 3)\n\n def on_reverse_button_pressed(self, k1: int, k2: int):\n \"\"\"Если нажата кнопка переворота, переворачиваем массив 3 и выводим\"\"\"\n try:\n self.model.reverse_array_from_to(self.array3, k1, k2)\n self.view.change_array(self.array3, 3)\n except IndexError:\n self.view.show_error(\"Index out of range\")\n","repo_name":"Mandalore1/SimpleTkinterArraysApp","sub_path":"ArraysPresenter.py","file_name":"ArraysPresenter.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36336827985","text":"import json\n\nfrom django.db import migrations\n\nwith open('recipes/migrations/ingredients.json', 'r', encoding='utf-8') as f:\n ingredients = json.load(f)\n\n\ndef remove_ingredients(apps, schema_editor):\n Ingredient = apps.get_model('recipes', 'Ingredient')\n for ingredient in ingredients:\n Ingredient.objects.get(name=ingredient['name']).delete()\n\n\ndef add_ingredients(apps, schema_editor):\n Ingredient = apps.get_model('recipes', 'Ingredient')\n for ingredient in ingredients:\n new_ingredient = Ingredient(**ingredient)\n new_ingredient.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('recipes', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(\n add_ingredients,\n remove_ingredients\n ),\n ]\n","repo_name":"bigfuto/foodgram","sub_path":"backend/recipes/migrations/0002_add_ingredients.py","file_name":"0002_add_ingredients.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19184839181","text":"from tkinter import *\r\nimport tkinter as tk\r\nfrom PIL import Image, ImageTk\r\nimport sys, csv, math,copy,os\r\nimport time\r\nimport numpy as np\r\nfrom builtins import len\r\nimport cv2\r\nimport tkinter as tk\r\nfrom PIL import Image, ImageTk\r\nimport requests \r\nimport threading \r\nimport random \r\n\r\n\r\n\r\n\r\n \r\n \r\nimport csv, math,copy\r\ndef getSheet(inputFile):\r\n with open(inputFile) as csvfile:\r\n sheet = csv.reader(csvfile, delimiter = ',')\r\n newData = []\r\n for row in sheet:\r\n newData.append(row)\r\n val1 = newData[1][0]\r\n rows, cols = len(newData), len(newData[0])\r\n return newData\r\n\r\n\r\ndef entropy(p):\r\n if abs(p-0)<0.00001:\r\n return 0\r\n else:\r\n return -p*math.log(p,2)\r\n\r\n\r\ndef getMutualInfo(data,splittingFeature):\r\n pos = []\r\n neg = []\r\n N = len(data)-1\r\n index = data[0].index(splittingFeature)\r\n posRef = data[1][index]\r\n label1 = 0\r\n label2 = 0\r\n labelRef = data[1][-1]\r\n for i in range(N):\r\n if data[i+1][index] == posRef:\r\n pos.append(i+1)\r\n if data[i+1][-1] == labelRef:\r\n label1+=1\r\n if data[i+1][index] != posRef:\r\n neg.append(i+1)\r\n if data[i+1][-1] != labelRef:\r\n label2+=1\r\n (p1, p2) = (float(label1)/N, float(label2)/N)\r\n H_Y = entropy(p1)+entropy(p2)\r\n label1_pos = 0\r\n label2_pos = 0\r\n label1_neg = 0\r\n label2_neg = 0\r\n N_pos = len(pos)\r\n N_neg = len(neg)\r\n for i in pos:\r\n if data[i][-1] == labelRef:\r\n label1_pos+=1\r\n else:\r\n label2_pos+=1\r\n for i in neg:\r\n if data[i][-1] == labelRef:\r\n label1_neg+=1\r\n else:\r\n label2_neg+=1\r\n (p1_pos,p2_pos) = (float(label1_pos)/N_pos,float(label2_pos)/N_pos) if N_pos != 0 else (0,0)\r\n (p1_neg,p2_neg) = (float(label1_neg)/N_neg,float(label2_neg)/N_neg) if N_neg != 0 else (0,0)\r\n H_YX = (float(N_pos)/N)*(entropy(p1_pos)+entropy(p2_pos))+(float(N_neg)/N)*(entropy(p1_neg)+entropy(p2_neg))\r\n return (H_Y-H_YX)\r\n\r\ndef partition(data,feature):\r\n posData = [data[0]]\r\n negData = [data[0]]\r\n featIndex = data[0].index(feature)\r\n posResponse = data[1][featIndex]\r\n negResponse = \"\"\r\n for i in range(len(data)-1):\r\n if data[i+1][featIndex]==posResponse:\r\n posData.append(data[i+1])\r\n else:\r\n if negResponse == \"\":\r\n negResponse=data[i+1][featIndex]\r\n negData.append(data[i+1])\r\n return(posData, negData, posResponse, negResponse)\r\n\r\n\r\nclass Tree(object):\r\n def __init__(self,data,labelName=[],leaf=False):\r\n self.lTree=None\r\n self.rTree=None\r\n #all relevant data for next depth\r\n self.data=data\r\n #the name of the two categories of splitting feature\r\n self.splitReference = []\r\n #the names of the two possible outcomes \r\n self.labelName = []\r\n #the number of each of the two outcomes at this level\r\n self.labelCount = []\r\n #the name of the splitting feature\r\n self.splittingFeature=None\r\n #indication of leaf node\r\n self.leaf=leaf\r\n\r\ndef labelNumbers(data,label1,label2):\r\n label1N=0\r\n label2N=0\r\n for i in range(len(data)-1):\r\n if data[i+1][-1]==label1: label1N+=1\r\n else: label2N+=1\r\n return(label1N,label2N)\r\n\r\ndef cancelFeature(data,feature):\r\n \r\n newData=copy.deepcopy(data)\r\n index=data[0].index(feature)\r\n rows=len(data)\r\n for i in range(rows):\r\n newData[i][index:index+1]=[]\r\n \r\n return newData\r\n\r\n\r\ndef printTree(tree,depth):\r\n if tree == None: return\r\n if tree.leaf:\r\n (label1,label2)=(tree.labelName[0],tree.labelName[1])\r\n (N1,N2)=(tree.labelCount[0],tree.labelCount[1])\r\n return \"[%d %s /%d %s]\\n\"%(N1,label1,N2,label2)\r\n else:\r\n splittingFeature=tree.splittingFeature\r\n (label1,label2)=(tree.labelName[0],tree.labelName[1])\r\n (N1,N2)=(tree.labelCount[0],tree.labelCount[1])\r\n featPos=tree.splitReference[0]\r\n featNeg=tree.splitReference[1]\r\n s2=\"[%d %s /%d %s]\\n\"%(N1,label1,N2,label2)\r\n s3=\"| \"*(depth+1)+\"%s = %s: \"%(splittingFeature,featPos)\r\n s4=printTree(tree.lTree,depth+1)\r\n s5=\"| \"*(depth+1)+\"%s = %s: \"%(splittingFeature,featNeg)\r\n s6=printTree(tree.rTree,depth+1)\r\n return s2+s3+s4+s5+s6\r\n\r\n\r\n\r\ndef train(data,depth, maxDepth,labelName=[]):\r\n decisionTree=Tree(data)\r\n if labelName==[]:\r\n label1 = data[1][-1]\r\n label2 = \"\"\r\n count = 2\r\n while (label2 == \"\" or label2 == label1) and count<(len(data)):\r\n label2 = data[count][-1]\r\n count+=1\r\n labelName=[label1,label2]\r\n decisionTree.labelName=labelName\r\n (N1,N2)=labelNumbers(data,labelName[0],labelName[1])\r\n labelCount=[0,0]\r\n labelCount[0]=N1\r\n labelCount[1]=N2\r\n decisionTree.labelCount=labelCount\r\n if N1==0 or N2==0:\r\n decisionTree.leaf=True\r\n return decisionTree\r\n if depthmaxI:\r\n maxI = I\r\n maxFeature = i\r\n if maxFeature == \"\":\r\n decisionTree.leaf=True \r\n return decisionTree\r\n (lTreeData, rTreeData, lResponse, rResponse)=partition(data,maxFeature)\r\n newLData=cancelFeature(lTreeData,maxFeature)\r\n newRData=cancelFeature(rTreeData,maxFeature)\r\n decisionTree.splitReference = [lResponse, rResponse]\r\n decisionTree.splittingFeature = maxFeature\r\n if len(newLData)==1 or len(newRData)==1:\r\n decisionTree.leaf=True\r\n return decisionTree\r\n newLTree=train(newLData,depth+1,maxDepth,labelName)\r\n newRTree=train(newRData,depth+1,maxDepth,labelName)\r\n decisionTree.lTree=newLTree\r\n decisionTree.rTree=newRTree\r\n return decisionTree\r\n else:\r\n decisionTree.leaf=True\r\n return decisionTree \r\n\r\n \r\ndef getMajority(tree):\r\n label1=tree.labelName[0]\r\n label2=tree.labelName[1]\r\n N1=tree.labelCount[0]\r\n N2=tree.labelCount[1]\r\n \r\n if N1>=N2:\r\n return label1\r\n else:\r\n return label2 \r\n\r\n \r\n\r\ndef predictOne(tree,data,features):\r\n if tree.leaf:\r\n return getMajority(tree)\r\n else:\r\n keyFeature = tree.splittingFeature\r\n featIndex=features.index(keyFeature)\r\n lReference=tree.splitReference[0]\r\n rReference=tree.splitReference[1]\r\n if data[featIndex] == lReference:\r\n return predictOne(tree.lTree, data, features)\r\n else:\r\n return predictOne(tree.rTree, data, features)\r\n \r\ndef predict(tree,data):\r\n results = []\r\n for i in range(len(data)-1):\r\n results.append(predictOne(tree,data[i+1],data[0]))\r\n return results\r\n\r\ndef getErr(L1, L2):\r\n N = len(L2)\r\n Err = 0\r\n for i in range(N):\r\n if L1[i] != L2[i]:\r\n Err+=1\r\n return float(Err)/N\r\n\r\ndef getLabel(data):\r\n N = len(data)-1\r\n labels = []\r\n for i in range(N):\r\n labels.append(data[i+1][-1])\r\n return labels \r\n\r\n\r\n\r\n\r\n\r\n\r\ndef train_tree():\r\n trainIn = getSheet(\"speechTrain.csv\")\r\n trainLabel = getLabel(trainIn)\r\n depth = 5\r\n decisionTree = train(trainIn,0,depth)\r\n trainOut = predict(decisionTree,trainIn)\r\n trainErr = getErr(trainOut,trainLabel)\r\n outPutTree=printTree(decisionTree,0)\r\n print(outPutTree)\r\n print(\"train error:%f\"%trainErr) \r\n return (decisionTree,outPutTree,trainErr) \r\n\r\n\r\n\r\n\r\n\r\nclass backEndProcess(object):\r\n def __init__ (self):\r\n #a list that stores all sample picture path in the folder\r\n self.pictre_path = []\r\n self.picture=None\r\n #api keys\r\n self.emotion_key = \"6e205c33cfde48eb88b1b1870d9957fe\"\r\n self.attention_key = \"c650165254c94a4f9e5cdac99cf0c1fe\"\r\n #length of picture path\r\n self.picture_count = 0\r\n #emotion data from last image for user call\r\n self.recent_emotion_data = None\r\n #attention data from last image for user call\r\n self.recent_attention_data = None\r\n #pre-trained decision tree model\r\n #self.model = decisionTree.trainModel()\r\n #emotion attribute labels\r\n self.population_emotion_data = []\r\n #attention label\r\n self.population_attention_data = []\r\n self.user_requested_emotion = None\r\n\r\n #default attention rate\r\n self.defaultAttentionRate = 56\r\n #margin for attention rate\r\n self.attentionMargin = 1\r\n #error for face finding\r\n self.errorMargin = 10\r\n self.inEmoMode = False\r\n \r\n \r\n \r\n \r\n def attention_data_process(self,faceList):\r\n if faceList == None or len(faceList) == 0:\r\n self.population_attention_data.append(0.23)\r\n return\r\n imageHeight=450\r\n imageWidth=600\r\n yawMargin = 15\r\n pitchMargin = 15\r\n rollMargin = 15\r\n \r\n centerY = imageHeight / 2\r\n centerX = imageWidth / 2\r\n totalAttention = 0\r\n total = len(faceList)\r\n \r\n for faceDict in faceList:\r\n #left,top,pitch,row,yaw\r\n attention = True\r\n left = faceDict[\"faceRectangle\"][\"left\"]\r\n top = faceDict[\"faceRectangle\"][\"top\"]\r\n width = faceDict[\"faceRectangle\"][\"width\"]\r\n height = faceDict[\"faceRectangle\"][\"height\"]\r\n \r\n pitch = faceDict[\"faceAttributes\"][\"headPose\"][\"pitch\"]\r\n roll = faceDict[\"faceAttributes\"][\"headPose\"][\"roll\"]\r\n yaw = faceDict[\"faceAttributes\"][\"headPose\"][\"yaw\"]\r\n print(\"pitch:%d,roll:%d,yaw:%d \\n\" % (pitch,roll,yaw))\r\n \r\n x = left + width / 2\r\n y = top + height / 2\r\n #need to determine angle\r\n if(x>centerX):\r\n #right of the screen\r\n if(abs(yaw) > yawMargin):\r\n attention = False\r\n print(\"wrong yaw1\")\r\n else:\r\n #left of the screen\r\n if(abs(yaw) > yawMargin):\r\n attention = False\r\n print(\"wrong yaw2\")\r\n \r\n print(\"here\")\r\n \r\n \r\n if(y>centerY):\r\n #bottom part of the screen\r\n if(abs(pitch) > pitchMargin):\r\n attention = False\r\n print(\"wrong pitch1\")\r\n else:\r\n #top part of the screen\r\n if(abs(pitch) > pitchMargin):\r\n attention = False\r\n print(\"wrong pitch2\")\r\n \r\n print(\"here too\")\r\n \r\n if(abs(roll) > rollMargin):\r\n attention = False\r\n print(\"wrong roll\")\r\n \r\n print(\"here too too\")\r\n \r\n if(attention):\r\n totalAttention = totalAttention + 1\r\n \r\n attenRate = totalAttention/(total*1.0)\r\n print(attenRate)\r\n self.population_attention_data.append(attenRate)\r\n \r\n def FaceBoundaries(self):\r\n faceList = self.recent_emotion_data\r\n return_list = []\r\n if faceList == None: return []\r\n for faceDict in faceList:\r\n x1=faceDict[\"faceRectangle\"][\"left\"]\r\n y1=faceDict[\"faceRectangle\"][\"top\"]\r\n x2=faceDict[\"faceRectangle\"][\"left\"]+faceDict[\"faceRectangle\"][\"width\"]\r\n y2=faceDict[\"faceRectangle\"][\"top\"]+faceDict[\"faceRectangle\"][\"height\"]\r\n return_list.append((x1*(3.0/4.0)+190,y1*(3.0/4.0)+237,x2*(3.0/4.0)+190,y2*(3.0/4.0)+237))\r\n \r\n \r\n def emotion_detection(self):\r\n emotion_key = self.emotion_key #\"6e205c33cfde48eb88b1b1870d9957fe\"\r\n assert emotion_key\r\n emotion_recognition_url = \"https://westus.api.cognitive.microsoft.com/emotion/v1.0/recognize\"\r\n image_data = self.picture\r\n headers = {'Ocp-Apim-Subscription-Key': emotion_key, \"Content-Type\": \"application/octet-stream\" }\r\n response = requests.post(emotion_recognition_url, headers=headers, data=image_data)\r\n response.raise_for_status()\r\n emotion_analysis = response.json()\r\n storage = emotion_analysis\r\n #print(emotion_analysis)\r\n self.emotion_data_process (storage) \r\n\r\n def face_detection(self):\r\n subscription_key = self.attention_key #'c650165254c94a4f9e5cdac99cf0c1fe'\r\n filename = self.picture\r\n uri_base = 'https://westcentralus.api.cognitive.microsoft.com'\r\n headers = {\r\n 'Content-Type': 'application/octet-stream',\r\n 'Ocp-Apim-Subscription-Key': subscription_key,\r\n }\r\n params = {\r\n 'returnFaceId': 'true',\r\n 'returnFaceAttributes': 'headPose',\r\n }\r\n path_to_face_api = '/face/v1.0/detect'\r\n \r\n img_data = filename\r\n try:\r\n response = requests.post(uri_base + path_to_face_api,\r\n data=img_data, \r\n headers=headers,\r\n params=params)\r\n #print ('Response:')\r\n parsed = response.json()\r\n storage = parsed\r\n #print (parsed)\r\n self.attention_data_process(storage)\r\n #print(\"no error !!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n return\r\n # display the image analysis data\r\n #return parsed\r\n except Exception as e:\r\n storage = None\r\n #print(\"000000000000000000000000000000000000000\")\r\n self.attention_data_process(storage)\r\n #print(e)\r\n\r\n def trigger_main(self, path):\r\n self.picture_count+=1\r\n self.picture = open(path,\"rb\").read()\r\n t1 = threading.Thread(target=self.emotion_detection)\r\n t2 = threading.Thread(target=self.face_detection)\r\n t1.start()\r\n t2.start() \r\n\r\n def trigger_curr(self, x,y,front_connection,rect):\r\n t = threading.Thread(target=self.getThreeEmotions, args=(x,y,front_connection,rect))\r\n t.start()\r\n\r\n \r\n \r\n def getThreeEmotions(self,x,y,connection,rect):\r\n faceList = copy.deepcopy(self.recent_emotion_data)\r\n #print(\"00000000000000\")\r\n emotion1 = \"\"\r\n emotion2 = \"\"\r\n emotion3 = \"\"\r\n margin = 20\r\n if (self.recent_emotion_data == None):\r\n return \r\n for faceDict in faceList:\r\n left = faceDict[\"faceRectangle\"][\"left\"]\r\n #print(\"Ok\")\r\n top = faceDict[\"faceRectangle\"][\"top\"]\r\n #print(\"ok\")\r\n width = faceDict[\"faceRectangle\"][\"width\"]\r\n #print(\"ok\")\r\n height = faceDict[\"faceRectangle\"][\"height\"]\r\n #print(\"ok\")\r\n right = left + width\r\n bottom = top + height\r\n #print(\"left:%d top: %d width:%d height:%d x:%d y:%d\"%(left,top,width,height,x,y))\r\n score = faceDict[\"scores\"]\r\n #print(\"000000\")\r\n\r\n #need to determine coordinate\r\n if(x >= (left - margin) and x <= (right + margin) and y <= (bottom + margin) and y >= (top - margin)):\r\n print(\"BullsEye\")\r\n first = 0;\r\n second = 0;\r\n third = 0;\r\n firstString = \"\"\r\n secondString = \"\"\r\n thirdString = \"\"\r\n for j in score:\r\n curr = score[j]\r\n if( curr > first):\r\n third = second\r\n thirdString = secondString\r\n second = first\r\n secondString = firstString\r\n first = curr\r\n firstString = j\r\n elif( curr > second):\r\n third = second\r\n thirdString = secondString\r\n second = curr\r\n secondString = j\r\n elif( curr > third):\r\n third = curr\r\n thirdString = j\r\n emotion1 = firstString\r\n emotion2 = secondString\r\n emotion3 = thirdString\r\n returnX = left\r\n returnY = top\r\n returnH = height\r\n returnW = width \r\n connection[0] = emotion1\r\n connection[1] = emotion2\r\n connection[2] = emotion3\r\n rect[0] = returnX*0.75+190\r\n rect[1] = returnY*0.75+237\r\n rect[2] = returnX*0.75+190+returnW*0.75\r\n rect[3] = returnY*0.75+237+returnH*0.75\r\n #print(connection)\r\n\r\n \r\n\r\n\r\n #called by the front end\r\n def getAttentionRateForUser(self):\r\n rateList = self.attention_attri_label\r\n return rateList[-1]\r\n \r\n def emotion_data_process(self,faceList):\r\n if not self.inEmoMode:\r\n self.recent_emotion_data = faceList \r\n emotionList = self.population_emotion_data\r\n if(len(faceList)==0):\r\n emotionList.append([True,True,False])\r\n return\r\n total = len(faceList)\r\n happyCutoff = 0.3\r\n normalCutoff = 0.45\r\n sadCutoff = 0.3\r\n total = len(faceList)\r\n avghappy = 0\r\n avgnormal = 0\r\n avgsad = 0\r\n happyTotal = 0\r\n normalTotal = 0\r\n sadTotal = 0\r\n\r\n happy = False\r\n normal = False\r\n sad = False\r\n for i in range(0,len(faceList)):\r\n #faceList: raw data returned by emotion API\r\n faceDict = faceList[i]\r\n scoreDict = faceDict[\"scores\"]\r\n happyTotal += scoreDict[\"happiness\"]\r\n sadTotal += scoreDict[\"sadness\"]\r\n normalTotal += scoreDict[\"neutral\"]\r\n avgnormal = normalTotal/total\r\n avgsad = sadTotal/total\r\n avghappy = happyTotal/total\r\n if(avghappy>happyCutoff):\r\n happy = True\r\n if(avgsad>sadCutoff):\r\n sad = True\r\n if(avgnormal>normalCutoff):\r\n normal = True\r\n emotionList.append([happy,normal,sad])\r\n \r\ndef rgbString(red, green, blue):\r\n return \"#%02x%02x%02x\" % (red, green, blue)\r\n \r\n\r\n \r\n \r\ndef init(data):\r\n data.treeCounter=0\r\n data.tree,data.treeContent,data.trainError=train_tree()\r\n data.att_rate_window_left = 840-65\r\n data.att_rate_window_top = 107\r\n data.att_rate_window_right = 840+65\r\n data.att_rate_window_bottom = 243\r\n\r\n data.succ_rate_window_left = 843-68\r\n data.succ_rate_window_top = 300\r\n data.succ_rate_window_right = 843+68\r\n data.succ_rate_window_bottom = 436\r\n \r\n data.emo_det_window_left = 720\r\n data.emo_det_window_top = 500\r\n data.emo_det_window_right = 720+240\r\n data.emo_det_window_bottom = 575\r\n \r\n data.counter = 0\r\n data.image_count = 0\r\n data.emotion_count = 0\r\n data.frame_num = 20\r\n data.output_path = \"./\"\r\n \r\n data.cam = cv2.VideoCapture(0)\r\n data.image = NONE\r\n data.tkimage = NONE\r\n data.emoimage = NONE\r\n data.var=random.choice([True,False])\r\n data.att_rate = 0.25\r\n data.att_show = False\r\n data.succ_rate = 0.57\r\n data.in_emo_det = False\r\n data.in_succ_ana = False \r\n data.threeEmotions = [\"\",\"\",\"\"]\r\n data.outLine = [0,0,0,0]\r\n data.emo_x = NONE\r\n data.emo_y = NONE\r\n data.ImgWid = 600\r\n data.ImgHei = 450\r\n data.reduWid = 450\r\n data.redHei = 338\r\n data.att_show=False \r\n \r\n data.spotlight = NONE\r\n \r\n #initiate class backEndProcess\r\n data.backEndProcess = backEndProcess()\r\n \r\n\r\ndef saveImage(data):\r\n filename = \"pic{}.jpg\".format(data.image_count) # construct filename\r\n p = os.path.join(data.output_path, filename) # construct output path\r\n data.image.save(p, \"JPEG\") # save image as jpeg file\r\n #image_to_analyze = data.image\r\n print(\"[INFO] saved {}\".format(filename))\r\n \r\n #change: trigger_main\r\n data.ImgWid,data.ImgHei = data.image.size\r\n data.backEndProcess.trigger_main(p)\r\n \r\n \r\n# def saveEmoImage(data):\r\n# filename = \"emo{}.jpg\".format(data.emotion_count) # construct filename\r\n# p = os.path.join(data.output_path, filename) # construct output path\r\n# data.image.save(p, \"JPEG\") # save image as jpeg file\r\n# print(\"[INFO] saved {}\".format(filename))\r\n\r\n\r\ndef succRatePressed(event,data):\r\n data.in_succ_ana = not data.in_succ_ana\r\n data.percentage = (random.random())*0.4+0.6\r\n if not data.in_succ_ana: data.treeCounter=0\r\n \r\n\r\ndef attRatePressed(event,data):\r\n data.att_show = not data.att_show\r\n\r\n\r\ndef emoDetPressed(event,data):\r\n data.in_emo_det = not data.in_emo_det\r\n data.backEndProcess.inEmoMode = False\r\n data.in_emo_ana = False\r\n data.threeEmotions = [\"\",\"\",\"\"]\r\n data.outLine=[0,0,0,0]\r\n if data.in_emo_det:\r\n # data.emotion_count += 1\r\n # saveEmoImage(data)\r\n if data.image_count == 0:\r\n img = data.image\r\n else:\r\n filename = \"pic{}.jpg\".format(data.image_count) # construct filename\r\n p = os.path.join(data.output_path, filename) # construct output path\r\n print(\"[INFO] opened \"+p)\r\n img = Image.open(p)\r\n \r\n #resize image\r\n img = img.resize((450,338),Image.ANTIALIAS)\r\n #converts to tkinter image\r\n tkImg=ImageTk.PhotoImage(image=img)\r\n imageLabel._image_cache=tkImg\r\n data.emoimage = tkImg\r\n \r\n\r\n\r\ndef emoAnaPressed(event, data):\r\n if data.in_emo_det:\r\n data.in_emo_ana = True\r\n data.backEndProcess.inEmoMode = True \r\n data.emo_x = event.x\r\n data.emo_y = event.y \r\n \r\n #change trigger_curr\r\n x = (event.x-190)*(4.0/3.0)#*float(600)/450\r\n y = (event.y-237)*(4.0/3.0)#*float(450)/338\r\n #data.threeEmotions = NONE\r\n data.backEndProcess.trigger_curr(x,y,data.threeEmotions,data.outLine)\r\n #print(\"mouse pressed\")\r\n \r\ndef mousePressed(event, data):\r\n # use event.x and event.y\r\n\r\n \r\n if(data.att_rate_window_left <= event.x and event.x <= data.att_rate_window_right\r\n and data.att_rate_window_top <= event.y and event.y <= data.att_rate_window_bottom):\r\n attRatePressed(event, data)\r\n \r\n if(data.succ_rate_window_left <= event.x and event.x <= data.succ_rate_window_right\r\n and data.succ_rate_window_top <= event.y and event.y <= data.succ_rate_window_bottom):\r\n succRatePressed(event, data)\r\n \r\n if(data.emo_det_window_left <= event.x and event.x <= data.emo_det_window_right\r\n and data.emo_det_window_top <= event.y and event.y <= data.emo_det_window_bottom):\r\n emoDetPressed(event, data)\r\n \r\n if(190 <= event.x and event.x <= 640 \r\n and 237 <= event.y and event.y <= 575):\r\n emoAnaPressed(event, data)\r\n\r\ndef keyPressed(event, data):\r\n # use event.char and event.keysym\r\n pass\r\n\r\ndef updateImage(data):\r\n #gets new frame from webcam feed every time it's called\r\n ret,frame=data.cam.read()\r\n frame=cv2.flip(frame,1)\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n img=Image.fromarray(cv2image)\r\n h=450\r\n desiredW=600\r\n img=img.crop((0,0,desiredW,h))\r\n #data.ImgWid,data.ImgHei = img.size\r\n #print(data.ImgWid,data.ImgHei)\r\n data.image=img\r\n #converts to tkinter image\r\n tkImg=ImageTk.PhotoImage(image=img)\r\n imageLabel._image_cache=tkImg\r\n data.tkimage = tkImg\r\n\r\n\r\ndef timerFired(data):\r\n if data.in_succ_ana:\r\n data.treeCounter+=1\r\n #change\r\n if data.counter == 0:\r\n img = Image.open(\"bullshit.png\")\r\n #resize image\r\n img = img.resize((100,85),Image.ANTIALIAS).convert('RGB')\r\n #converts to tkinter image\r\n tkImg=ImageTk.PhotoImage(image=img)\r\n imageLabel._image_cache=tkImg\r\n data.spotlight = tkImg\r\n \r\n #update counter\r\n data.counter += 1\r\n #save image\r\n if data.counter % data.frame_num == 0:\r\n data.image_count += 1\r\n saveImage(data)\r\n #update image\r\n updateImage(data)\r\n #change update att rate\r\n if (len(data.backEndProcess.population_attention_data) != 0):\r\n data.att_rate = data.backEndProcess.population_attention_data[-1]\r\n #if (data.counter%10 == 0):\r\n # data.att_rate = random.choice([1,-1])*random.random()*0.01*data.att_rate + data.att_rate\r\n \r\n \r\n \r\ndef drawBackground(canvas,data):\r\n #draws the background around the image \r\n color=rgbString(23,24,20)\r\n rectW,rectH=1200,800\r\n canvas.create_rectangle(0,0,rectW*2,rectH*2,fill=color,width=0)\r\n # if data.spotlight != NONE:\r\n # canvas.create_image(0,0,anchor=NW,image=data.spotlight)\r\n \r\n \r\ndef drawMiddleLine(canvas,data):\r\n color = rgbString(218, 218, 218)\r\n canvas.create_line(680,0,680,750, fill = color, width = 2)\r\n \r\ndef drawSuccRateWindow(canvas,data):\r\n red = int(96 * (data.succ_rate / 1.0))\r\n green = int(202 * (data.succ_rate / 1.0))\r\n blue = int(247 * (data.succ_rate / 1.0))\r\n fillColor=rgbString(red,green,blue)\r\n \r\n color=rgbString(23,24,20)\r\n \r\n textColor=rgbString(225,225,225)\r\n x0=data.succ_rate_window_left\r\n y0=data.succ_rate_window_top\r\n x1=data.succ_rate_window_right\r\n y1=data.succ_rate_window_bottom\r\n r=20\r\n\r\n x2=x0+r\r\n y2=y0+r\r\n x3=x1-r\r\n y3=y1-r\r\n \r\n\r\n canvas.create_arc(x0,y0,x1,y1,outline=\"\",fill=fillColor,style=tk.PIESLICE,start=90,extent=(-data.succ_rate*(359)),width=0)\r\n canvas.create_arc(x2,y2,x3,y3,outline=\"\",fill=color,style=tk.PIESLICE,start=90,extent=(-data.succ_rate*(359)),width=0)\r\n canvas.create_text((x0+x1)/2,(y0+y1)/2,text=\"Success: \"+str(int(data.succ_rate*100))+\"%\",fill=textColor)\r\n\r\n\r\ndef drawAttRateWindow(canvas,data):\r\n \r\n red = int(96 * (data.succ_rate / 1.0))\r\n green =int(202 * (data.succ_rate / 1.0))\r\n blue = int(247 * (data.succ_rate / 1.0))\r\n fillColor=rgbString(red,green,blue)\r\n color=rgbString(23,24,20)\r\n textColor=rgbString(225,225,225)\r\n x0=data.att_rate_window_left\r\n y0=data.att_rate_window_top\r\n x1=data.att_rate_window_right\r\n y1=data.att_rate_window_bottom\r\n r=20\r\n \r\n x2=x0+r\r\n y2=y0+r\r\n x3=x1-r\r\n y3=y1-r\r\n \r\n\r\n canvas.create_arc(x0,y0,x1,y1,outline=\"\",fill=fillColor,style=tk.PIESLICE,start=90,extent=-data.att_rate*359,width=0)\r\n canvas.create_arc(x2,y2,x3,y3,outline=\"\",fill=color,style=tk.PIESLICE,start=90,extent=-data.att_rate*359,width=0)\r\n canvas.create_text((x0+x1)/2,(y0+y1)/2,text=\"Attention: \"+str(int(data.att_rate*100))+\"%\",fill=textColor)\r\n \r\n\r\ndef drawEmoDetWindow(canvas,data):\r\n fillColor=rgbString(108,109,105)\r\n textColor=rgbString(225,225,225)\r\n x0=data.emo_det_window_left\r\n y0=data.emo_det_window_top\r\n x1=data.emo_det_window_right\r\n y1=data.emo_det_window_bottom\r\n r=3\r\n\r\n x2=x0+r\r\n y2=y0\r\n x3=x1-r\r\n y3=y1\r\n canvas.create_rectangle(x2,y2,x3,y3,fill=fillColor,width=0)\r\n\r\n x4=x0\r\n y4=y0+r\r\n x5=x1\r\n y5=y1-r\r\n canvas.create_rectangle(x4,y4,x5,y5,fill=fillColor,width=0)\r\n\r\n x6=x0\r\n y6=y0\r\n x7=x6+2*r\r\n y7=y6+2*r\r\n canvas.create_oval(x6,y6,x7,y7,fill=fillColor,width=0)\r\n\r\n x8=x1-2*r\r\n y8=y0\r\n x9=x8+2*r\r\n y9=y8+2*r\r\n canvas.create_oval(x8,y8,x9,y9,fill=fillColor,width=0)\r\n\r\n x10=x0\r\n y10=y1-2*r\r\n x11=x10+2*r\r\n y11=y10+2*r\r\n canvas.create_oval(x10,y10,x11,y11,fill=fillColor,width=0)\r\n\r\n x12=x1-2*r\r\n y12=y1-2*r\r\n x13=x1\r\n y13=y1\r\n canvas.create_oval(x12,y12,x13,y13,fill=fillColor,width=0)\r\n\r\n canvas.create_text((x0+x1)/2,(y0+y1)/2,text=\"Emotion Detection\",fill=textColor)\r\n \r\n \r\ndef drawVideoWindowNotEmo(canvas,data):\r\n color=rgbString(218, 218, 218)\r\n canvas.create_rectangle(40,125,640,575, fill = color, width = 3,outline=\"grey\")\r\n canvas.create_image(40,125,anchor=NW,image=data.tkimage)\r\n \r\n if data.att_show:\r\n face_list = data.backEndProcess.FaceBoundaries()\r\n if face_list==None:return\r\n for n in face_list:\r\n print(\"0000000000000\")\r\n (x0,y0,x1,y1)=n\r\n outline_color = \"green\"\r\n canvas.create_rectangle(x0,y0,x1,y1,outline = outline_color) \r\n \r\ndef predictResult(data):\r\n #high/low concentration \r\n list1 = copy.deepcopy(data.backEndProcess.population_attention_data)\r\n #happy, normal, sad\r\n list2 = copy.deepcopy(data.backEndProcess.population_emotion_data)\r\n new_list=[[\"attention\",\"happy\",\"normal\",\"sad\",\"attentionVariance\",\"Interest\"]]\r\n for i in range(min(len(list1),len(list2))):\r\n new_list.append([list1[i]>=0.5,list2[i][0],list2[i][1],list2[i][2],data.var])\r\n results=predict(data.tree,new_list)\r\n return results \r\n \r\ndef drawTree(canvas,data):\r\n strings=data.treeContent.splitlines()\r\n lines=min(data.treeCounter,len(strings))\r\n textColor = rgbString(96,202,247)\r\n if data.treeCounter>=3*len(strings):\r\n results = predictResult(data)\r\n \r\n percentage =( float(results.count(\"Y\"))/(len(results)))*100\r\n canvas.create_text(200,700,text=\"your success prediction is %f\" %percentage,fill=textColor,font=50)\r\n data.succ_rate = data.percentage\r\n \r\n else:\r\n for i in range(lines):\r\n canvas.create_text(200,800+20*i-data.treeCounter*15,text=strings[i],fill=textColor,font=28)\r\n \r\n\r\n \r\ndef drawVideoWindowEmo(canvas,data):\r\n #resize image\r\n img=data.image.resize((150,112),Image.ANTIALIAS)\r\n #converts to tkinter image\r\n tkImg=ImageTk.PhotoImage(image=img)\r\n imageLabel._image_cache=tkImg\r\n \r\n #draw\r\n color=rgbString(218, 218, 218)\r\n canvas.create_rectangle(40,125,640,575, fill = color, width = 3,outline=\"grey\")\r\n canvas.create_image(40,125,anchor=NW,image=tkImg)\r\n \r\n \r\n \r\ndef drawEmoImgWindow(canvas,data): \r\n #draw\r\n color=rgbString(218, 218, 218)\r\n textColor=rgbString(225,225,225)\r\n canvas.create_rectangle(190,237,640,575, fill = color, width = 3,outline=\"grey\")\r\n canvas.create_image(190,237,anchor=NW,image=data.emoimage)\r\n \r\n #change three emotions\r\n if data.threeEmotions[0] != \"\":\r\n print(\"we are here\")\r\n x = data.emo_x\r\n y = data.emo_y\r\n #print(data.threeEmotions)\r\n canvas.create_text(x+40, y-20, text=data.threeEmotions[0],fill=textColor,font=15)\r\n canvas.create_text(x+40, y, text=data.threeEmotions[1],fill=textColor,font=15)\r\n canvas.create_text(x+40, y+20, text=data.threeEmotions[2],fill=textColor,font=15)\r\n canvas.create_rectangle(data.outLine[0],data.outLine[1],data.outLine[2],data.outLine[3],outline = color,width=3)\r\n \r\n \r\ndef redrawAllEmo(canvas, data):\r\n drawBackground(canvas,data)\r\n drawMiddleLine(canvas,data)\r\n drawSuccRateWindow(canvas,data)\r\n drawAttRateWindow(canvas,data)\r\n drawEmoDetWindow(canvas,data)\r\n drawVideoWindowEmo(canvas,data)\r\n drawEmoImgWindow(canvas,data)\r\n\r\n\r\ndef redrawAllNotEmo(canvas, data):\r\n drawBackground(canvas,data)\r\n drawMiddleLine(canvas,data)\r\n drawSuccRateWindow(canvas,data)\r\n drawAttRateWindow(canvas,data)\r\n drawEmoDetWindow(canvas,data)\r\n drawVideoWindowNotEmo(canvas,data)\r\n \r\n\r\n \r\n \r\n\r\ndef redrawAllSucc(canvas,data):\r\n drawBackground(canvas,data)\r\n drawMiddleLine(canvas,data)\r\n drawSuccRateWindow(canvas,data)\r\n drawAttRateWindow(canvas,data)\r\n drawEmoDetWindow(canvas,data)\r\n drawTree(canvas,data)\r\n drawVideoWindowNotEmo(canvas,data)\r\n\r\n\r\ndef redrawAll(canvas, data):\r\n # draw in canvas\r\n if data.in_succ_ana:\r\n redrawAllSucc(canvas,data)\r\n elif data.in_emo_det:\r\n redrawAllEmo(canvas, data)\r\n else:\r\n redrawAllNotEmo(canvas, data)\r\n \r\n\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\ndef run(width=1000, height=750):\r\n global root, imageLabel, canvas\r\n \r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n init(data)\r\n # create the root and the canvas\r\n root = tk.Toplevel()\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n imageLabel = tk.Label(root)\r\n imageLabel.pack()\r\n # set up events\r\n root.bind(\"\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun()\r\ncv2.VideoCapture(0).release()\r\n'''\r\ndef test():\r\n testInstance = backEndProcess()\r\n testInstance.trigger_main(\"trump.jpg\")\r\n time.sleep(2)\r\n print(testInstance.population_emotion_data)\r\n print(testInstance.population_attention_data)\r\n\r\n\r\n#test()\r\ntree=train_tree()\r\n'''","repo_name":"baijiayu/spotlight","sub_path":"finalsubmission.py","file_name":"finalsubmission.py","file_ext":"py","file_size_in_byte":33995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36114211648","text":"# coding=utf-8\nimport tensorflow as tf\nimport numpy as np\nimport base64\nfrom absl import logging\nimport webdataset as wds\n\n\nif __name__ == \"__main__\":\n import sys, glob, os\n\n record_filename = sys.argv[1]\n output_dir = sys.argv[2]\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n \n \n index = 0\n num_per_dir = 2000\n \n pattern = os.path.join(output_dir, f\"eng_zh-%06d.tar\")\n sink = wds.ShardWriter(pattern, maxsize=int(6e9), maxcount=int(num_per_dir))\n \n all_lines = []\n with open(record_filename, 'r') as f:\n for line in f:\n items = line.strip('\\n').split('\\t')\n text = items[0]\n embedding = \"\\t\".join(items[1:])\n xkey = \"%07d\" % index\n sample = {\n \"__key__\": xkey, \n \"text\": text,\n \"embedding\": embedding\n }\n # Write the sample to the sharded tar archives.\n sink.write(sample)\n index += 1\n \n sink.close()\n \n","repo_name":"saicoco/webdataset","sub_path":"write2tar.py","file_name":"write2tar.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"71497154727","text":"from django.urls import include, path\nfrom rest_framework import routers\n\nfrom transactions import views\n\nrouter = routers.DefaultRouter()\nrouter.register('categories', views.CategoriesViewSet, 'categories')\nrouter.register('transactions', views.TransactionsViewSet, 'accounting')\nrouter.register('report', views.ReportsViewSet, 'report')\n\nurlpatterns = [\n path('', include(router.urls)),\n\n]\n","repo_name":"ipanki/cost_accounting","sub_path":"transactions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25435823561","text":"from random import randint\nfrom time import sleep\nlista = list()\njogos = list()\ntot = 1\nprint('-'*30)\nprint(f'{\"JOGO DA MEGA SENA\":^30}')\nprint('-'*30)\nquant = int(input('Quantos jogos você quer sortear? '))\nwhile tot <= quant:\n cont = 0\n while True:\n num = randint(1, 60)\n if num not in lista:\n lista.append(num)\n cont += 1\n if cont >= 6:\n break\n lista.sort()\n jogos.append(lista[:])\n lista.clear()\n tot += 1\nprint(f'\\n-=-=-= SORTEANDO {quant} JOGOS =-=-=-')\nfor i in range(0, quant):\n sleep(1)\n print(f'Jogo {i+1}: {jogos[i]}')\nprint(f'-=-=-=-=-= BOA SORTE =-=-=-=-=-')\n\n","repo_name":"miradouro/CursoEmVideo-Python","sub_path":"aula018 - LISTAS/ex088-guanabara.py","file_name":"ex088-guanabara.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74940285609","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom models import utils\n\nfrom models.capsNet_distribute import CapsNetDistribute\n\n\nclass CapsNetMultiTasks(CapsNetDistribute):\n\n def __init__(self, cfg, model_arch):\n super(CapsNetMultiTasks, self).__init__(cfg, model_arch)\n self.clf_arch_info = None\n self.rec_arch_info = None\n self.batch_size = cfg.BATCH_SIZE // cfg.GPU_NUMBER // cfg.TASK_NUMBER\n\n @staticmethod\n def _sum_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n This function provides a synchronization point across all towers.\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the\n gradient calculation for each tower.\n - shape: [[(grad0_gpu0, var0_gpu0), ..., (gradM_gpu0, varM_gpu0)],\n ...,\n [(grad0_gpuN, var0_gpuN), ..., (gradM_gpuN, varM_gpuN)]]\n\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n sum_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Each grad_and_vars looks like:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for grad, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_grad = tf.expand_dims(grad, 0)\n # Append on a 'tower' dimension which we will average over.\n grads.append(expanded_grad)\n\n # grads: [[grad0_gpu0], [grad0_gpu1], ..., [grad0_gpuN]]\n # Sum over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_sum(grad, 0)\n\n # The Variables are redundant because they are shared across towers.\n # So we will just return the first tower's pointer to the Variable.\n v = grad_and_vars[0][1] # varI_gpu0\n grad_and_var = [grad, v]\n sum_grads.append(grad_and_var)\n\n # sum_grads: [[sum_grad0, var0], [sum_grad1, var1], ..., [sum_gradM, varM]]\n return sum_grads\n\n @staticmethod\n def _average_sum_grads(grads_sum, n_tower):\n \"\"\"Calculate the average of sum_gradients.\n\n Args:\n grads_sum: [[sum_grad0, var0], [sum_grad1, var1], ..., [sum_gradM, varM]]\n\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n \"\"\"\n avg_grads = []\n for avg_var in grads_sum:\n avg_grads.append((avg_var[0] / n_tower, avg_var[1]))\n\n # avg_grads: [(avg_grad0, var0), (avg_grad1, var1), ..., (avg_gradM, varM)]\n return avg_grads\n\n def _average_metrics_tower(self, loss_tower, acc_tower, preds_tower,\n clf_loss_tower, rec_loss_tower, rec_images_tower):\n \"\"\"Calculate average of metrics of a tower.\n\n Args:\n loss_tower: final losses of each task, list\n acc_tower: accuracies of each task, list\n preds_tower: predictions of each task, list\n clf_loss_tower: classifier losses of each task, list\n rec_loss_tower: reconstruction losses of each task, list\n rec_images_tower: reconstructed images of each task, list of 4D tensor\n\n Returns:\n tuple of metrics\n \"\"\"\n n_task = float(len(loss_tower))\n\n loss_tower = tf.divide(\n tf.add_n(loss_tower), n_task, name='loss_tower')\n assert loss_tower.get_shape() == ()\n\n acc_tower = tf.divide(\n tf.add_n(acc_tower), n_task, name='acc_tower')\n assert acc_tower.get_shape() == ()\n\n preds_tower = tf.concat(preds_tower, axis=0, name='preds_tower')\n assert preds_tower.get_shape()[0] == \\\n self.cfg.BATCH_SIZE // self.cfg.GPU_NUMBER\n\n if self.cfg.WITH_REC:\n clf_loss_tower = tf.divide(\n tf.add_n(clf_loss_tower), n_task, name='clf_loss_tower')\n assert clf_loss_tower.get_shape() == ()\n\n rec_loss_tower = tf.divide(\n tf.add_n(rec_loss_tower), n_task, name='rec_loss_tower')\n assert rec_loss_tower.get_shape() == ()\n\n rec_images_tower = tf.concat(\n rec_images_tower, axis=0, name='rec_images_tower')\n assert rec_images_tower.get_shape().as_list()[0] == \\\n self.cfg.BATCH_SIZE // self.cfg.GPU_NUMBER\n else:\n clf_loss_tower, rec_loss_tower, rec_images_tower = None, None, None\n\n return loss_tower, acc_tower, preds_tower, \\\n clf_loss_tower, rec_loss_tower, rec_images_tower\n\n def _calc_on_gpu(self, gpu_idx, x_tower, y_tower, imgs_tower,\n image_size, is_training, optimizer):\n\n # Split data for each tower\n x_splits_task = tf.split(\n axis=0, num_or_size_splits=self.cfg.TASK_NUMBER, value=x_tower)\n y_splits_task = tf.split(\n axis=0, num_or_size_splits=self.cfg.TASK_NUMBER, value=y_tower)\n imgs_splits_task = tf.split(\n axis=0, num_or_size_splits=self.cfg.TASK_NUMBER, value=imgs_tower)\n\n loss_tower, acc_tower, preds_tower, clf_loss_tower, \\\n rec_loss_tower, rec_images_tower = [], [], [], [], [], []\n # grads_tower = []\n grads_tower_sum = None\n for i in tqdm(range(self.cfg.TASK_NUMBER), ncols=100, unit=' task'):\n\n # Dequeues one task\n x_task, y_task, imgs_task = \\\n x_splits_task[gpu_idx], y_splits_task[gpu_idx], imgs_splits_task[i]\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=bool(i != 0)):\n with tf.name_scope('task_%d' % i):\n # Calculate the loss for one task.\n loss_task, acc_task, preds_task, clf_loss_task, \\\n rec_loss_task, rec_images_task = \\\n self._get_loss(x_task, y_task, imgs_task,\n image_size, is_training=is_training)\n\n # Calculate the gradients on this task.\n grads_task = optimizer.compute_gradients(loss_task)\n\n # Keep track of the gradients across all tasks.\n # grads_tower.append(grads_task)\n\n if i == 0:\n grads_tower_sum = grads_task\n else:\n grads_tower_sum = self._sum_gradients(\n [grads_tower_sum, grads_task])\n\n # Collect metrics of each task\n loss_tower.append(loss_task)\n acc_tower.append(acc_task)\n clf_loss_tower.append(clf_loss_task)\n rec_loss_tower.append(rec_loss_task)\n rec_images_tower.append(rec_images_task)\n preds_tower.append(preds_task)\n\n # Calculate the mean of each gradient.\n # grads_tower = self._average_gradients(grads_tower)\n grads_tower = self._average_sum_grads(\n grads_tower_sum, self.cfg.TASK_NUMBER)\n\n # Calculate means of metrics\n loss_tower, acc_tower, preds_tower, clf_loss_tower, rec_loss_tower, \\\n rec_images_tower = self._average_metrics_tower(\n loss_tower, acc_tower, preds_tower, clf_loss_tower,\n rec_loss_tower, rec_images_tower)\n\n return grads_tower, loss_tower, acc_tower, clf_loss_tower, \\\n rec_loss_tower, rec_images_tower, preds_tower\n\n def build_graph(self,\n input_size=(None, None, None),\n image_size=(None, None, None),\n num_class=None,\n n_train_samples=None):\n \"\"\"Build the graph of CapsNet.\n\n Args:\n input_size: size of input tensor\n image_size: the size of ground truth images, should be 3 dimensional\n num_class: number of class of label\n n_train_samples: number of train samples\n\n Returns:\n tuple of (global_step, train_graph, inputs, labels, train_op,\n saver, summary_op, loss, accuracy, classifier_loss,\n reconstruct_loss, reconstructed_images)\n \"\"\"\n tf.reset_default_graph()\n train_graph = tf.Graph()\n\n with train_graph.as_default(), tf.device('/cpu:0'):\n\n # Get inputs tensor\n inputs, labels, input_imgs, is_training = \\\n self._get_inputs(input_size, num_class, image_size=image_size)\n\n # Global step\n global_step = tf.placeholder(tf.int16, name='global_step')\n\n # Optimizer\n optimizer = self._optimizer(self.cfg.OPTIMIZER,\n n_train_samples=n_train_samples,\n global_step=global_step)\n\n # Split data for each tower\n x_splits_tower = tf.split(\n axis=0, num_or_size_splits=self.cfg.GPU_NUMBER, value=inputs)\n y_splits_tower = tf.split(\n axis=0, num_or_size_splits=self.cfg.GPU_NUMBER, value=labels)\n imgs_splits_tower = tf.split(\n axis=0, num_or_size_splits=self.cfg.GPU_NUMBER, value=input_imgs)\n\n # Calculate the gradients for each models tower.\n grads_all, loss_all, acc_all, clf_loss_all, \\\n rec_loss_all, rec_images_all, preds_all = \\\n [], [], [], [], [], [], []\n for i in range(self.cfg.GPU_NUMBER):\n\n utils.thin_line()\n print('Building tower: ', i)\n\n # Dequeues one batch for the GPU\n x_tower, y_tower, imgs_tower = \\\n x_splits_tower[i], y_splits_tower[i], imgs_splits_tower[i]\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=bool(i != 0)):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i):\n\n grads_tower, loss_tower, acc_tower, clf_loss_tower, \\\n rec_loss_tower, rec_images_tower, preds_tower = \\\n self._calc_on_gpu(i, x_tower, y_tower, imgs_tower,\n image_size, is_training, optimizer)\n\n # Keep track of the gradients across all towers.\n grads_all.append(grads_tower)\n\n # Collect metrics of each tower\n loss_all.append(loss_tower)\n acc_all.append(acc_tower)\n clf_loss_all.append(clf_loss_tower)\n rec_loss_all.append(rec_loss_tower)\n rec_images_all.append(rec_images_tower)\n preds_all.append(preds_tower)\n\n # Calculate the mean of each gradient.\n grads = self._average_gradients(grads_all)\n\n # Calculate means of metrics\n loss, accuracy, preds, classifier_loss, reconstruct_loss, \\\n reconstructed_images = self._average_metrics(\n loss_all, acc_all, preds_all, clf_loss_all,\n rec_loss_all, rec_images_all)\n\n # Show variables\n utils.thin_line()\n print('Variables: ')\n for v in tf.global_variables():\n print(v)\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_op = optimizer.apply_gradients(grads)\n\n # Track the moving averages of all trainable variables.\n if self.cfg.MOVING_AVERAGE_DECAY:\n variable_averages = tf.train.ExponentialMovingAverage(\n self.cfg.MOVING_AVERAGE_DECAY)\n variables_averages_op = variable_averages.apply(\n tf.trainable_variables())\n\n # Group all updates to into a single train op.\n train_op = tf.group(apply_gradient_op, variables_averages_op)\n else:\n train_op = apply_gradient_op\n\n # Create a saver.\n saver = tf.train.Saver(tf.global_variables(),\n max_to_keep=self.cfg.MAX_TO_KEEP_CKP)\n\n # Build the summary operation from the last tower summaries.\n tf.summary.scalar('accuracy', accuracy)\n tf.summary.scalar('loss', loss)\n if self.cfg.WITH_REC:\n tf.summary.scalar('clf_loss', classifier_loss)\n tf.summary.scalar('rec_loss', reconstruct_loss)\n summary_op = tf.summary.merge_all()\n\n return global_step, train_graph, inputs, labels, input_imgs, \\\n is_training, train_op, saver, summary_op, loss, accuracy, \\\n classifier_loss, reconstruct_loss, reconstructed_images, preds\n\n\nif __name__ == '__main__':\n\n from baseline_config import config as basel_cfg\n from baseline_arch import basel_arch\n step_, train_graph_, inputs_, labels_, is_training_, \\\n optimizer_, saver_, summary_, loss_, accuracy_, \\\n clf_loss_, rec_loss_, rec_images_ = \\\n CapsNetMultiTasks(basel_cfg, basel_arch).build_graph(\n image_size=(28, 28, 1),\n num_class=10)\n","repo_name":"LeanderLXZ/oracle-recognition","sub_path":"src/models/capsNet_multi_tasks.py","file_name":"capsNet_multi_tasks.py","file_ext":"py","file_size_in_byte":12131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8477514763","text":"#!/usr/bin/python3\nimport os\nimport time\nfrom lib import logger\n\n\nclass FileClassificationException(Exception):\n pass\n\n\nclass FileClassificationEngine:\n \"\"\"\n The MediaHandlerClassificationEngine takes lists of absolute file paths and checks every file for different\n classification identifiers. If found, all classification identifiers are correlated to the according file and\n stored in a seperate MediaHandlerClassificationEngineResults-file (first iteration, second iteration = database).\n \"\"\"\n #TODO: Expand classification identification patterns for increased detection- and information return rate.\n #TODO: Add check against MediaHandlerClassificationEngineResults-file to get only new files\n #TODO: Add database to store MediaHandlerCalssificationEngine results (@Lucas)\n\n def __init__(self):\n self.logger = logger()\n self.video_codec_list = ['avi', 'cam', 'mkv', 'mov', 'mpeg', 'mpg', 'mpe', 'svi', 'wmv',]\n self.music_codec_list = ['jpeg', 'png', 'ppm', 'jpg', 'tga']\n self.image_codec_list = []\n self.doc_codec_list = []\n\n\n def classification_engine(self, file_list, type=None):\n \"\"\"\n Extracts all files from a list and classifies them. If type is given, only a dict for the given file type is\n returned\n :param file_list:\n :param type:\n :return:\n \"\"\"\n movie_list = []\n series_list = []\n music_list = []\n doc_list = []\n image_list = []\n\n begin = time.time()\n\n for index, file in enumerate(file_list):\n postfix = file.split('.')[-1]\n filesize = os.stat(file).st_size\n\n if postfix in self.video_codec_list:\n self.logger.info('{} is classified as video with a size of {}'.format(file, filesize))\n\n if postfix == 'mkv' and filesize <= '3000000000':\n media_type = 'series'\n series_list.append(file)\n\n elif postfix == 'mkv':\n media_type = 'movie'\n movie_list.append(file)\n\n elif postfix != 'mkv' and filesize <= '1000000000':\n media_type = 'series'\n series_list.append(file)\n\n else:\n media_type = 'movie'\n movie_list.append(file)\n\n elif postfix in self.music_codec_list:\n self.logger.info('{} is classified as music with a size of {}'.format(file, filesize))\n\n music_list.append(file)\n\n elif postfix in self.image_codec_list:\n self.logger.info('{} is classified as image with a size of {}'.format(file, filesize))\n\n image_list.append(file)\n\n elif postfix in self.doc_codec_list:\n self.logger.info('{} is classified as document with a size of {}'.format(file, filesize))\n\n doc_list.append(file)\n\n else:\n self.logger.alert('Postfix {} of {} is an unknown codec and therefor ignored!'.format(postfix, file))\n\n end = time.time()\n duration = float(end) - float(begin)\n\n self.logger.info('It took {} seconds to process {} different files!'.format(duration, index))\n\n if type == 'movie':\n return movie_list\n elif type == 'series':\n return series_list\n elif type == 'music':\n return music_list\n elif type == 'document':\n return doc_list\n\n return movie_list, series_list, music_list, doc_list\n\nif __name__ == '__main__':\n FileClassificationEngine().init()\n","repo_name":"I777IOmnomnom/wg-repository","sub_path":"archive/iNasQ/src/file_classification_engine.py","file_name":"file_classification_engine.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"75003864167","text":"import math\nimport time\nfrom typing import Dict\n\nimport numpy as np\n\nfrom engine_step import EngineStep, EngineStepConfig\n\n\nclass SubsamplingFilterConfig(EngineStepConfig):\n @staticmethod\n def from_json(json: Dict):\n return SubsamplingFilterConfig(json['samplesPerSec'], json['maxSubsamples'], json['windowLengthSec'])\n\n def __init__(self, samples_per_sec: float = 0, max_subsamples: int = 0, window_length_sec: float = 0):\n super().__init__()\n self.samples_per_sec = samples_per_sec\n self.max_subsamples = max_subsamples\n self.window_length_sec = window_length_sec\n\n\nclass SubsamplingFilter(EngineStep):\n \"\"\"\n `SubsamplingFilter` calculates the minimum and maximum value for each\n `sample_rate` samples, and returns an array `[min1, max1, min2, max2, ...]`\n where `minN`, `maxN` are min and max values for the Nth subsampling window.\n\n This is not a typical subsampling algorithm; this is done to make sure that the\n UI can plot the full range of the signal and not accidentally hide any\n peaks or troughs.\n\n Note that `subsample_rate` doesn't have to be an integer. If it isn't,\n `SubsamplingFilter` tries to periodically include a 'leap' sample in the subsampling\n window to account for the fractional value of `subsample_rate`.\n \"\"\"\n name = 'SubsamplingFilter'\n\n def __init__(self):\n super().__init__()\n\n self.max_subsamples = 0\n self.subsample_rate = 0\n self.config = None\n self.was_reset = False\n\n # This number might not be an integer if the sampling rate is not an integer\n self.leftover_sample_fraction = 0\n self.leftover_samples = np.zeros(0, float)\n\n def configure(self, config: SubsamplingFilterConfig, engine) -> None:\n self.max_subsamples = config.max_subsamples\n self.config = config\n num_samples_in_window = config.samples_per_sec * config.window_length_sec\n self.subsample_rate = 2 * num_samples_in_window / self.max_subsamples\n\n self.was_reset = True\n\n self.leftover_sample_fraction = 0\n self.leftover_samples = np.zeros(0, float)\n\n def do_step(self, data_ndarray):\n if (data_ndarray is None) or (len(data_ndarray) == 0):\n self.result = None\n return\n\n # Note: this can be float.\n samples = np.concatenate((self.leftover_samples, data_ndarray))\n num_samples = len(samples)\n samples_available_for_subsampling = num_samples - self.leftover_sample_fraction\n\n num_subsamples = math.floor(samples_available_for_subsampling / self.subsample_rate)\n total_samples_subsampled = 0\n subsamples = None\n to_sample = 0\n leftover_sample_fraction = self.leftover_sample_fraction\n\n if num_subsamples > 0:\n subsamples = np.zeros(num_subsamples * 2, float)\n from_sample = 0\n\n for i in range(num_subsamples):\n # Note that self.subsample_rate is a float. If it has a non-integer value,\n # we need to correctly include an extra 'leap' subsample every once in a while.\n should_include_samples = leftover_sample_fraction + self.subsample_rate\n actual_included_samples = math.floor(should_include_samples)\n to_sample = from_sample + actual_included_samples\n\n min_value = np.amin(samples[from_sample:to_sample])\n max_value = np.amax(samples[from_sample:to_sample])\n\n subsamples[2*i] = min_value\n subsamples[2*i + 1] = max_value\n\n total_samples_subsampled += actual_included_samples\n from_sample = to_sample\n\n # This should be 0 <= x < 1\n leftover_sample_fraction = should_include_samples - actual_included_samples\n\n self.leftover_sample_fraction = leftover_sample_fraction\n self.leftover_samples = samples[to_sample:]\n self.result = subsamples\n\n def after_step(self):\n pass\n","repo_name":"OpenMEA/OpenMEA_Studio","sub_path":"engine/filters/subsampling_filter.py","file_name":"subsampling_filter.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27377382232","text":"import os.path\nimport json\nimport zipfile\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom openpyxl import load_workbook\nimport ukcensusapi.Nomisweb as Api\nimport ukpopulation.utils as utils\n\n\nclass SNPPData:\n \"\"\"\n Functionality for downloading and collating UK Subnational Population Projection (NPP) data\n Nomisweb stores the England data (only)\n Wales/Scotland/NI are not the responsiblity of ONS and are made avilable online by the relevant statistical agency\n \"\"\"\n\n def __init__(self, cache_dir=utils.default_cache_dir()):\n self.cache_dir = cache_dir\n self.data_api = Api.Nomisweb(self.cache_dir)\n\n self.data = {}\n self.data[utils.EN] = self.__do_england()\n self.data[utils.WA] = self.__do_wales()\n self.data[utils.SC] = self.__do_scotland()\n self.data[utils.NI] = self.__do_nireland()\n\n # LADs * 26 years * 91 ages * 2 genders\n # assert len(self.data) == (326+22+32+11) * 26 * 91 * 2\n\n def min_year(self, code):\n \"\"\"\n Returns the first year in the projection, assumes a single LAD or country code\n \"\"\"\n # convert to country if necessary\n if \"0\" in code:\n code = utils.country(code)[0]\n return min(self.data[code].PROJECTED_YEAR_NAME.unique())\n\n def max_year(self, code):\n \"\"\"\n Returns the final year in the projection, assumes a single LAD or country code\n \"\"\"\n # convert to country if necessary\n if \"0\" in code:\n code = utils.country(code)[0]\n return max(self.data[code].PROJECTED_YEAR_NAME.unique())\n\n def all_lads(self, countries):\n \"\"\"\n Returns all the LAD codes in the country or countries specified\n Supports EN WA SC NI EW GB UK\n \"\"\"\n if isinstance(countries, str):\n countries = [countries]\n lads = []\n for country in countries:\n if country in self.data:\n lads.extend(self.data[country].GEOGRAPHY_CODE.unique())\n else:\n # warn if missing or invalid\n print(\"WARNING: no LAD codes for country %s\", country)\n return lads\n\n def filter(self, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):\n\n # convert inputs to arrays if single values supplied (for isin)\n if isinstance(geog_codes, str):\n geog_codes = [geog_codes]\n\n if np.isscalar(ages):\n ages = [ages]\n\n if np.isscalar(genders):\n genders = [genders]\n\n # Handle problem with empty list not being recognised as Null, was causing problem in utils.trim_range() below\n if not years:\n years = None\n\n countries = utils.country(geog_codes)\n\n # TODO fix incorrect assumption is that all countries have the same year range\n years = utils.trim_range(years, self.min_year(countries[0]), self.max_year(countries[0]))\n\n retval = pd.DataFrame() # {\"GEOGRAPHY_CODE\": [], \"PROJECTED_YEAR_NAME\": [], \"C_AGE\": [], \"GENDER\":[], \"OBS_VALUE\": []})\n # loop over datasets as needed\n for country in countries:\n # apply filters\n retval = retval.append(self.data[country][(self.data[country].GEOGRAPHY_CODE.isin(geog_codes)) &\n (self.data[country].PROJECTED_YEAR_NAME.isin(years)) &\n (self.data[country].C_AGE.isin(ages)) &\n (self.data[country].GENDER.isin(genders))], ignore_index=True,\n sort=False)\n\n # check for any codes requested that werent present (this check is far easier to to on the result)\n invalid_codes = np.setdiff1d(geog_codes, retval.GEOGRAPHY_CODE.unique())\n if len(invalid_codes) > 0:\n raise ValueError(\"Filter for LAD code(s): %s for years %s returned no data (check also age/gender filters)\"\n % (str(invalid_codes), str(years)))\n\n return retval\n\n def aggregate(self, categories, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):\n\n data = self.filter(geog_codes, years, ages, genders)\n\n # invert categories (they're the ones to aggregate, not preserve)\n return data.groupby(utils.check_and_invert(categories))[\"OBS_VALUE\"].sum().reset_index()\n\n # year_range can include year that dont need to be extrapolated\n # Filtering age and gender is not (currently) supported\n def extrapolate(self, npp, geog_codes, year_range):\n\n if isinstance(geog_codes, str):\n geog_codes = [geog_codes]\n\n geog_codes = utils.split_by_country(geog_codes)\n\n all_codes_all_years = pd.DataFrame()\n\n for country in geog_codes:\n if not geog_codes[country]: continue\n\n max_year = self.max_year(country)\n last_year = self.filter(geog_codes[country], max_year)\n\n (in_range, ex_range) = utils.split_range(year_range, max_year)\n # years that dont need to be extrapolated\n all_years = self.filter(geog_codes[country], in_range) if in_range else pd.DataFrame()\n\n for year in ex_range:\n data = last_year.copy()\n scaling = npp.year_ratio(\"ppp\", country, max_year, year)\n data = data.merge(scaling[[\"GENDER\", \"C_AGE\", \"OBS_VALUE\"]], on=[\"GENDER\", \"C_AGE\"])\n data[\"OBS_VALUE\"] = data.OBS_VALUE_x * data.OBS_VALUE_y\n data.PROJECTED_YEAR_NAME = year\n all_years = all_years.append(data.drop([\"OBS_VALUE_x\", \"OBS_VALUE_y\"], axis=1), ignore_index=True,\n sort=False)\n\n all_codes_all_years = all_codes_all_years.append(all_years, ignore_index=True, sort=False)\n\n return all_codes_all_years\n\n def extrapolagg(self, categories, npp, geog_codes, year_range):\n \"\"\"\n Extrapolate and then aggregate\n \"\"\"\n data = self.extrapolate(npp, geog_codes, year_range)\n\n # invert categories (they're the ones to aggregate, not preserve)\n return data.groupby(utils.check_and_invert(categories))[\"OBS_VALUE\"].sum().reset_index()\n\n def create_variant(self, variant_name, npp, geog_codes, year_range):\n \"\"\"\n Apply NPP variant to SNPP: SNPP(v) = SNPP(0) * sum(a,g) [ NPP(v) / NPP(0) ]\n Preserves age-gender structure of SNPP data\n \"\"\"\n result = pd.DataFrame()\n if isinstance(geog_codes, str):\n geog_codes = [geog_codes]\n\n for geog_code in geog_codes:\n\n (pre_range, in_range) = utils.split_range(year_range, npp.min_year() - 1)\n # for any years prior to NPP we just use the SNPP data as-is (i.e. \"ppp\")\n pre_data = self.filter(geog_code, pre_range) if pre_range else pd.DataFrame()\n\n if len(pre_data) > 0:\n print(\"WARNING: variant {} not applied for years {} that predate the NPP data\".format(variant_name,\n pre_range))\n\n # return if there's nothing in the NPP range\n if not in_range:\n result.append(pre_data)\n continue\n\n data = self.extrapolate(npp, geog_code, in_range).sort_values(\n [\"C_AGE\", \"GENDER\", \"PROJECTED_YEAR_NAME\"]).reset_index(drop=True)\n\n scaling = npp.variant_ratio(variant_name, utils.country(geog_code), year_range).reset_index().sort_values(\n [\"C_AGE\", \"GENDER\", \"PROJECTED_YEAR_NAME\"])\n # scaling.to_csv(variant_name + \".csv\", index=False)\n\n print(\"DF: \", len(data), \":\", len(scaling))\n assert (len(data) == len(scaling))\n data.OBS_VALUE = data.OBS_VALUE * scaling.OBS_VALUE\n\n # prepend any pre-NPP data\n result = result.append(pre_data.append(data))\n\n return result\n\n def __do_england(self):\n # return self.__do_england_ons() # 2014\n return self.__do_england_nomisweb() # 2018\n\n # nomisweb data is now 2018-based\n def __do_england_nomisweb(self):\n print(\"Collating SNPP data for England...\")\n\n # need to do this in 2 batches as entire table has >1000000 rows\n table_internal = \"NM_2006_1\" # SNPP\n query_params = {\n \"gender\": \"1,2\",\n \"c_age\": \"101...191\",\n \"MEASURES\": \"20100\",\n \"date\": \"latest\", # 2018-based\n \"projected_year\": \"2018...2031\",\n \"select\": \"geography_code,projected_year_name,gender,c_age,obs_value\",\n \"geography\": \"1946157057...1946157382\"\n }\n snpp_e = self.data_api.get_data(table_internal, query_params)\n\n query_params[\"projected_year\"] = \"2032...2043\"\n snpp_e = snpp_e.append(self.data_api.get_data(table_internal, query_params))\n # make age actual year\n snpp_e.C_AGE = snpp_e.C_AGE - 101\n\n # snpp_e[(snpp_e.GEOGRAPHY_CODE==\"E08000021\") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv(\"snpp_ncle_2016.csv\")\n # assert(len(snpp_e) == 26*2*91*326) # 326 LADs x 91 ages x 2 genders x 26 years\n return snpp_e\n\n # Alternative method of downloading the en data from ONS website(Only works with 2014 as it stands).\n def __do_england_ons(self):\n print(\"Collating SNPP data for England...\")\n england_src = \"https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationprojections/datasets/localauthoritiesinenglandz1/2014based/snppz1population.zip\"\n england_raw = self.cache_dir + \"/snpp_e.csv\"\n england_zip = self.cache_dir + \"/snpp_e.zip\"\n\n if os.path.isfile(england_raw):\n snpp_e = pd.read_csv(england_raw)\n else:\n response = requests.get(england_src)\n with open(england_zip, 'wb') as fd:\n for chunk in response.iter_content(chunk_size=1024):\n fd.write(chunk)\n print(\"Downloaded\", england_zip)\n\n z = zipfile.ZipFile(england_zip)\n # print(z.namelist())\n\n snpp_e = pd.DataFrame()\n for gender in [1, 2]:\n filename = \"2014 SNPP Population \" + (\"males\" if gender == 1 else \"females\") + \".csv\"\n chunk = pd.read_csv(z.open(filename)) \\\n .drop([\"AREA_NAME\", \"COMPONENT\", \"SEX\"], axis=1) \\\n .query('AGE_GROUP != \"All ages\"')\n # .AGE_GROUP.replace({\"90 and over\": \"90\"}\n chunk.AGE_GROUP = chunk.AGE_GROUP.replace({\"90 and over\": \"90\"})\n chunk = chunk.melt(id_vars=[\"AREA_CODE\", \"AGE_GROUP\"])\n # chunk = chunk[chunk.AGE_GROUP != \"all ages\"]\n # chunk = chunk.stack().reset_index()\n chunk.columns = [\"GEOGRAPHY_CODE\", \"C_AGE\", \"PROJECTED_YEAR_NAME\", \"OBS_VALUE\"]\n chunk[\"GENDER\"] = gender\n snpp_e = snpp_e.append(chunk)\n\n # assert(len(snpp_e) == 26*2*91*326) # 326 districts x 91 ages x 2 genders x 26 years\n snpp_e.to_csv(england_raw, index=False)\n\n # snpp_e[(snpp_e.GEOGRAPHY_CODE==\"E08000021\") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv(\"snpp_ncle_2014.csv\")\n return snpp_e\n\n # Wales\n\n def __do_wales(self):\n print(\"Collating SNPP data for Wales...\")\n cache_dir = utils.default_cache_dir()\n\n wales_raw = cache_dir + \"/snpp_w.csv\"\n if os.path.isfile(wales_raw):\n snpp_w = pd.read_csv(wales_raw)\n else:\n fields = ['Area_AltCode1', 'Year_Code', 'Data', 'Gender_Code', 'Age_Code', 'Area_Hierarchy', 'Variant_Code']\n # StatsWales is an OData endpoint, so select fields of interest\n url = \"http://open.statswales.gov.wales/dataset/popu6010?$select={}\".format(\",\".join(fields))\n # use OData syntax to filter P (persons), AllAges (all ages), Area_Hierarchy 691 (LADs)\n url += \"&$filter=Gender_Code ne 'P' and Area_Hierarchy gt 690 and Area_Hierarchy lt 694 and Variant_Code eq 'Principal'\"\n #\n data = []\n while True:\n print(url)\n r = requests.get(url)\n r_data = r.json()\n data += r_data['value']\n if \"odata.nextLink\" in r_data:\n url = r_data[\"odata.nextLink\"]\n else:\n break\n snpp_w = pd.DataFrame(data)\n\n # Remove unwanted and rename wanted columns\n snpp_w = snpp_w.drop([\"Area_Hierarchy\", \"Variant_Code\"], axis=1)\n snpp_w = snpp_w.rename(columns={\"Age_Code\": \"C_AGE\",\n \"Area_AltCode1\": \"GEOGRAPHY_CODE\",\n \"Data\": \"OBS_VALUE\",\n \"Gender_Code\": \"GENDER\",\n \"Year_Code\": \"PROJECTED_YEAR_NAME\"})\n # Remove all but SYOA and make numeric\n snpp_w = snpp_w[(snpp_w.C_AGE != \"AllAges\") & (snpp_w.C_AGE != \"00To15\") & (snpp_w.C_AGE != \"16To64\") & (\n snpp_w.C_AGE != \"65Plus\")]\n snpp_w.loc[snpp_w.C_AGE == \"90Plus\", \"C_AGE\"] = \"90\"\n snpp_w.C_AGE = pd.to_numeric(snpp_w.C_AGE)\n\n # convert gender to census convention 1=M, 2=F\n snpp_w.GENDER = snpp_w.GENDER.map({\"M\": 1, \"F\": 2})\n\n # assert(len(snpp_w) == 26*2*91*22) # 22 LADs x 91 ages x 2 genders x 26 years\n print(wales_raw)\n snpp_w.to_csv(wales_raw, index=False)\n\n return snpp_w\n\n def __do_scotland(self):\n lookup = {\n 'Aberdeen City': 'S12000033',\n 'Aberdeenshire': 'S12000034',\n 'Angus': 'S12000041',\n 'Argyll & Bute': 'S12000035',\n 'City of Edinburgh': 'S12000036',\n 'Clackmannanshire': 'S12000005',\n 'Dumfries & Galloway': 'S12000006',\n 'Dundee City': 'S12000042',\n 'East Ayrshire': 'S12000008',\n 'East Dunbartonshire': 'S12000045',\n 'East Lothian': 'S12000010',\n 'East Renfrewshire': 'S12000011',\n 'Falkirk': 'S12000014',\n 'Fife': 'S12000015',\n 'Glasgow City': 'S12000046',\n 'Highland': 'S12000017',\n 'Inverclyde': 'S12000018',\n 'Midlothian': 'S12000019',\n 'Moray': 'S12000020',\n 'Na h-Eileanan Siar': 'S12000013',\n 'North Ayrshire': 'S12000021',\n 'North Lanarkshire': 'S12000044',\n 'Orkney Islands': 'S12000023',\n 'Perth & Kinross': 'S12000024',\n 'Renfrewshire': 'S12000038',\n 'Scottish Borders': 'S12000026',\n 'Shetland Islands': 'S12000027',\n 'South Ayrshire': 'S12000028',\n 'South Lanarkshire': 'S12000029',\n 'Stirling': 'S12000030',\n 'West Dunbartonshire': 'S12000039',\n 'West Lothian': 'S12000040'\n }\n\n print(\"Collating SNPP data for Scotland...\")\n\n scotland_raw = self.cache_dir + \"/snpp_s.csv\"\n\n scotland_src = \"https://www.nrscotland.gov.uk/files//statistics/population-projections/sub-national-pp-18/detailed-tables/pop-proj-principal-2018-council-area.zip\"\n scotland_zip = self.cache_dir + \"/snpp_s_2018.zip\"\n\n if os.path.isfile(scotland_raw):\n snpp_s = pd.read_csv(scotland_raw)\n else:\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0'}\n response = requests.get(scotland_src, headers=headers)\n with open(scotland_zip, 'wb') as fd:\n for chunk in response.iter_content(chunk_size=1024):\n fd.write(chunk)\n print(\"Downloaded\", scotland_zip)\n\n z = zipfile.ZipFile(scotland_zip)\n snpp_s = pd.DataFrame()\n\n for filename in z.namelist():\n council_area = filename[37:-4]\n if council_area in [\"Metadata\", \"Scotland\"]:\n continue\n GEOGRAPHY_CODE = lookup[council_area]\n\n chunk = pd.read_csv(z.open(filename), encoding=\"ISO-8859-1\", header=102)\n # Drop Nan Rows\n chunk = chunk.dropna(axis=0, how=\"all\")\n # Drop Last row with containing Copyright Cell.\n chunk = chunk.drop(chunk.tail(1).index[0])\n\n chunk = chunk.rename(columns={\"Unnamed: 0\": \"C_AGE\"})\n chunk[\"GEOGRAPHY_CODE\"] = GEOGRAPHY_CODE\n chunk[\"GENDER\"] = ''\n\n # Drop rows where C_AGE == \"All Ages\"\n chunk = chunk.drop(chunk.index[chunk[\"C_AGE\"] == \"All ages\"])\n chunk.loc[(chunk.C_AGE == '90 and over'), 'C_AGE'] = 90\n\n chunk = chunk.reset_index(drop=True)\n\n chunk.loc[\n chunk.index[(chunk[\"C_AGE\"] == \"MALES\")][0] + 1:chunk.index[(chunk[\"C_AGE\"] == \"FEMALES\")][0] - 4,\n \"GENDER\"] = 1\n chunk.loc[chunk.index[(chunk[\"C_AGE\"] == \"FEMALES\")][0] + 1:, \"GENDER\"] = 2\n\n chunk = chunk[chunk.GENDER != '']\n\n for year in range(2018, 2044):\n appendable_chunk = chunk[[\"GEOGRAPHY_CODE\", \"C_AGE\", str(year), \"GENDER\"]].rename(\n columns={str(year): \"OBS_VALUE\"})\n appendable_chunk[\"PROJECTED_YEAR_NAME\"] = year\n snpp_s = snpp_s.append(appendable_chunk)\n snpp_s.reset_index(drop=True)\n snpp_s['OBS_VALUE'] = snpp_s['OBS_VALUE'].str.replace(',', '')\n snpp_s['OBS_VALUE'] = pd.to_numeric(snpp_s['OBS_VALUE'])\n snpp_s.to_csv(scotland_raw, index=False)\n\n return snpp_s\n\n def __do_nireland(self):\n # Niron\n # (1 worksheet per LAD equivalent)\n print(\"Collating SNPP data for Northern Ireland...\")\n ni_src = \"https://www.nisra.gov.uk/sites/nisra.gov.uk/files/publications/SNPP16_LGD14_SYA_1641.xlsx\"\n ni_raw = self.cache_dir + \"/snpp_ni.csv\"\n if os.path.isfile(ni_raw):\n snpp_ni = pd.read_csv(ni_raw)\n else:\n response = requests.get(ni_src)\n with open(self.cache_dir + \"/ni_raw.xlsx\", 'wb') as fd:\n for chunk in response.iter_content(chunk_size=1024):\n fd.write(chunk)\n\n # easier to hard-code the worksheet names we need (since unlikely to change frequently)\n districts = [\"Antrim & Newtownabbey\",\n \"Ards & North Down\",\n \"Armagh Banbridge & Craigavon\",\n \"Belfast\",\n \"Causeway Coast & Glens\",\n \"Derry & Strabane\",\n \"Fermanagh & Omagh\",\n \"Lisburn & Castlereagh\",\n \"Mid & East Antrim\",\n \"Mid Ulster\",\n \"Newry Mourne & Down\"]\n\n xls_ni = load_workbook(self.cache_dir + \"/ni_raw.xlsx\", read_only=True)\n\n snpp_ni = pd.DataFrame()\n\n for d in districts:\n # 1 extra row compared to 2014 data (below was A2)\n area_code = xls_ni[d][\"A3\"].value\n # 2 extra rows compared to 2014 data (below was A3:A95)\n males = utils.read_cell_range(xls_ni[d], \"A5\", \"AA97\")\n females = utils.read_cell_range(xls_ni[d], \"A100\", \"AA192\")\n\n dfm = pd.DataFrame(data=males[1:, 1:], index=males[1:, 0], columns=males[0, 1:]).drop(\n [\"Age\"]).stack().reset_index()\n dfm.columns = [\"C_AGE\", \"PROJECTED_YEAR_NAME\", \"OBS_VALUE\"]\n dfm[\"GENDER\"] = pd.Series(1, dfm.index)\n dfm[\"GEOGRAPHY_CODE\"] = pd.Series(area_code, dfm.index)\n dfm.loc[dfm.C_AGE == \"90+\", \"C_AGE\"] = \"90\"\n\n dff = pd.DataFrame(data=females[1:, 1:], index=females[1:, 0], columns=females[0, 1:]).drop(\n [\"Age\"]).stack().reset_index()\n dff.columns = [\"C_AGE\", \"PROJECTED_YEAR_NAME\", \"OBS_VALUE\"]\n dff[\"GENDER\"] = pd.Series(2, dff.index)\n dff[\"GEOGRAPHY_CODE\"] = pd.Series(area_code, dff.index)\n dff.loc[dff.C_AGE == \"90+\", \"C_AGE\"] = 90\n\n snpp_ni = snpp_ni.append(dfm)\n snpp_ni = snpp_ni.append(dff)\n\n # assert(len(snpp_ni) == 26*2*91*11) # 11 districts x 91 ages x 2 genders x 26 years\n snpp_ni.to_csv(ni_raw, index=False)\n\n return snpp_ni\n","repo_name":"nismod/ukpopulation","sub_path":"ukpopulation/snppdata.py","file_name":"snppdata.py","file_ext":"py","file_size_in_byte":20642,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"}
+{"seq_id":"38252619039","text":"print(\"remaining live count in life\")\n# 🚨 Don't change the code below 👇\nage = input(\"What is your current age? \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\n#change str to int\n\nage_as_int=(int(age))\n\n#how many years we live in earth\n\nyears=(90-age_as_int)\n\n#calculate age into days,weeks months\ndays= (365*years)\nweeks = (years*52)\nmonths = (years*12)\n\nmessge=(f\"You have {years} years, You have {days} days, {weeks} weeks, and {months} months left.\")\n\nprint(messge)\n","repo_name":"pandeeswaran7373/Learnpython","sub_path":"remaining live count in life.py","file_name":"remaining live count in life.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16483955544","text":"from agentspace import Agent,Space\nimport numpy as np\nimport cv2\nimport sys\nsys.path.append('../pyicubsim')\nfrom pyicubsim import iCubApplicationName, iCubRightArm, iCubLeftArm, checkArmConstraints\n\nclass ProprioceptionAgent(Agent):\n\n def init(self):\n iCubApplicationName('/app/imitation')\n self.right_arm = iCubRightArm()\n self.left_arm = iCubLeftArm()\n self.net = cv2.dnn.readNet('vae-iCub-arms-simplified-encoder.pb')\n self.attach_trigger(\"camera\")\n \n def senseSelectAct(self):\n right_joints = self.right_arm.get()\n left_joints = self.left_arm.get()\n joints = right_joints[:5] + left_joints[:5]\n blobs = np.array([joints])/180.0\n self.net.setInput(blobs)\n out = self.net.forward()\n features = tuple(out[0])\n Space.write(\"proprio\",features)\n","repo_name":"andylucny/learningImitation","sub_path":"mirror/ProprioceptionAgent.py","file_name":"ProprioceptionAgent.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30864356543","text":"import yannsa\n\n# EuclideanIndex or CosineIndex \nindex = yannsa.EuclideanIndex()\n\n# Init data could use init_data_fvecs or init_data_embedding\n# The format of fvec is the same with http://corpus-texmex.irisa.fr \n# The format of embedding is the same with word2vec \n\n# Data stored are key-vector pairs, and fvecs does not provided the key, so that int-id starting from 0 is used as the key.\nindex.init_data_fvecs(\"sift_base.fvecs\")\n\nindex.build(k=20, join_k=40, lambda_value=0.2, refine_iter_num=20)\nindex.save_index(\"python_index\")\n\n","repo_name":"shallyan/Yannsa","sub_path":"python_binding/example/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"18738568223","text":"\"\"\"Class for loading stock data\"\"\"\nimport yfinance as yf\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\n\n\nclass StockData:\n \"\"\"\n \n \"\"\"\n def __init__(self, src_filename):\n self.src_filename = src_filename\n self.portfolio = dict()\n self.stock_data = pd.read_csv(self.src_filename, header=2, decimal=',')\n self.stock_data = self.stock_data[(self.stock_data['Anlageklasse'] == 'Aktien')]\n self.sectors = self.stock_data['Sektor'].unique()\n\n def pick_rand_stock_for_sector(self):\n \"\"\"picks random stock from the index\"\"\"\n for i, sector in enumerate(self.sectors):\n print(f\"\\nSektor: {sector}\\n\")\n stocks = self.stock_data[(self.stock_data['Sektor'] == sector)]\n rand_number = np.random.randint(0, len(stocks))\n print(f\"Random index number: {rand_number}\")\n print(f\"Number of stocks in the sector: {len(stocks)}\")\n\n print(stocks[['Name', 'Marktwert']].iloc[rand_number])\n\n def pick_top_k_stock_for_sector(self, k=1):\n \"\"\"This method prints top k market cap weighted stocks per sector\"\"\"\n for sector in self.sectors:\n # print(f\"\\nSektor: {sector}\\n\")\n stocks = self.stock_data[(self.stock_data['Sektor'] == sector)][:k]\n\n # print(stocks[['Name', 'Marktwert']])\n \n # add the stocks from the sector to the portfolio\n self.portfolio[sector] = list(stocks['Name'].values)\n # self.portfolio.append(list(stocks['Name'].values))","repo_name":"fjeske/stockAnalyzer","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25651636881","text":"from django.shortcuts import render\nfrom .models import Item, Order, OrderItem, BillingAddress, Payment, Coupon, Refund\nfrom .forms import CheckoutForm, CouponForm,RefundForm\nfrom django.views.generic import ListView, DetailView, View\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.conf import settings\nimport stripe\nstripe.api_key = settings.STRIPE_SECRET_KEY\nimport random\nimport string\n# `source` is obtained with Stripe.js; see https://stripe.com/docs/payments/accept-a-payment-charges#web-create-token\n\n# Create your views here.\n\ndef create_ref_code():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\n\nclass HomeView(ListView):\n model = Item\n paginate_by = 10\n template_name = \"home-page.html\"\n\nclass CheckoutView(LoginRequiredMixin, View):\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n form = CheckoutForm()\n context = {\n 'form': form,\n 'couponform': CouponForm(),\n 'order': order,\n 'DISPLAY_COUPON_FORM': True\n }\n return render(self.request, \"checkout-page.html\", context)\n except ObjectDoesNotExist:\n messages.info(self.request, \"You don't have an active order\") \n return redirect(\"my_app:checkout\")\n \n\n def post(self, *args, **kwargs):\n form = CheckoutForm(self.request.POST or None)\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n if form.is_valid():\n street_address = form.cleaned_data.get('street_address')\n apartment_address = form.cleaned_data.get('apartment_address')\n country = form.cleaned_data.get('country')\n zipp = form.cleaned_data.get('zipp')\n # same_billing_address = form.cleaned_data.get('same_billing_address')\n # save_info = form.cleaned_data.get('save_info')\n payment_option = form.cleaned_data.get('payment_option')\n billing_address = BillingAddress(\n user = self.request.user,\n street_address = street_address,\n apartment_address = apartment_address,\n country = country,\n zipp = zipp, \n )\n billing_address.save()\n order.billing_address = billing_address\n order.save()\n\n if payment_option == 'S': \n return redirect('my_app:payment', payment_option='stripe')\n elif payment_option == 'S': \n return redirect('my_app:payment', payment_option='stripe')\n else:\n messages.warning(self.request, \"Invalid payment option selected\")\n return redirect('my_app:checkout')\n except ObjectDoesNotExist:\n messages.warning(request, \"You don not have an active order\")\n return redirect(\"my_app:order-summary\")\n \n\nclass PaymentView(View):\n def get(self, *args, **kwargs):\n order = Order.objects.get(user=self.request.user, ordered=False)\n if order.billing_address:\n context = {\n 'order': order,\n 'DISPLAY_COUPON_FORM': False\n }\n return render(self.request, \"payment.html\", context)\n else:\n messages.warning(request, \"You have not added a billing address\")\n return redirect(\"my_app:checkout\") \n\n def post(self, *args, **kwargs):\n print(\"girdiii\")\n order = Order.objects.get(user=self.request.user, ordered=False)\n token = self.request.POST.get('stripeToken')\n amount = int(order.get_total() * 100) \n\n try:\n charge = stripe.Charge.create(\n amount=amount,\n currency=\"usd\",\n source=token,\n description=\"Charge for \" + self.request.user.username,\n )\n payment = Payment()\n payment.stripe_charge_id = charge['id']\n payment.user = self.request.user\n payment.amount = order.get_total()\n payment.save()\n\n order_items = order.items.all()\n order_items.update(ordered=True)\n for item in order_items:\n item.save()\n\n \n order.ordered = True\n order.payment = payment\n order.ref_code = create_ref_code()\n order.save()\n\n messages.success(self.request, \"Successful Payment\")\n return redirect(\"/\")\n\n\n except stripe.error.CardError as e:\n body = e.json_body\n err = body.get('error', {})\n messages.warning(self.request, \"Rate Limit Error\")\n return redirect(\"/\")\n\n # Since it's a decline, stripe.error.CardError will be caught\n\n except stripe.error.RateLimitError as e:\n # Too many requests made to the API too quickly\n messages.warning(self.request, \"Invalid parameters\")\n return redirect(\"/\")\n\n except stripe.error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n messages.warning(self.request, \"Invalid Request\")\n return redirect(\"/\")\n\n except stripe.error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you changed API keys recently)\n messages.warning(self.request, \"Not authenticated\")\n return redirect(\"/\")\n\n except stripe.error.APIConnectionError as e:\n # Network communication with Stripe failed\n messages.warning(self.request, \"Network Error\")\n return redirect(\"/\")\n\n except stripe.error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n messages.warning(self.request, \"Something went wrong\")\n return redirect(\"/\")\n\n except Exception as e:\n # Something else happened, completely unrelated to Stripe\n messages.warning(self.request, \"Something went terribly wrong\")\n return redirect(\"/\")\n\n \n\nclass OrderSummaryView(LoginRequiredMixin, View):\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n context = {\n 'object': order\n }\n return render(self.request, 'order_summary.html', context)\n except ObjectDoesNotExist:\n messages.warning(self.request, \"You don not have an active order\")\n return redirect(\"/\")\n\nclass ItemDetailView(DetailView):\n model = Item\n template_name = \"product-page.html\"\n\n# class ProdcuctDetailView(DetailView):\n# model = Item\n# template_name = \"product-page.html\"\n\ndef item_list(request):\n context = {\n \"items\": Item.objects.all()\n }\n return render(request, \"item_list.html\", context)\n\ndef products(request):\n return render(request, \"product-page.html\") \n\n@login_required\ndef add_to_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_item, created = OrderItem.objects.get_or_create(\n item=item,\n user=request.user,\n ordered=False\n )\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n if order.items.filter(item__slug=item.slug).exists():\n order_item.quantity += 1\n order_item.save()\n messages.info(request, \"This item qunatity was updated\")\n return redirect(\"my_app:product\", slug=slug)\n else:\n messages.info(request, \"This item was added to your cart\")\n order.items.add(order_item)\n return redirect(\"my_app:product\", slug=slug)\n\n else:\n ordered_date = timezone.now()\n order = Order.objects.create(user=request.user, ordered_date=ordered_date)\n order.items.add(order_item)\n messages.info(request, \"This item was added to your cart\")\n return redirect(\"my_app:product\", slug=slug)\n\n@login_required\ndef remove_from_cart(request, slug): \n full_path = \"http://\" + request.META.get('REMOTE_ADDR') + \":\" + request.META['SERVER_PORT'] + \"/order-summary/\" \n requested_path = request.META.get('HTTP_REFERER')\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(\n user=request.user,\n ordered=False \n )\n if order_qs.exists():\n order = order_qs[0]\n if order.items.filter(item__slug=item.slug).exists():\n order_item = OrderItem.objects.filter(\n item=item,\n user=request.user,\n ordered=False\n )[0] \n order.items.remove(order_item)\n messages.info(request, \"This item was removed from your cart\") \n if requested_path == full_path:\n return redirect(\"my_app:order-summary\") \n return redirect(\"my_app:product\", slug=slug)\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect(\"my_app:product\", slug=slug)\n else:\n messages.info(request, \"You don't have an active order\")\n return redirect(\"my_app:product\", slug=slug)\n\n@login_required\ndef remove_single_item_from_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(\n user=request.user,\n ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n if order.items.filter(item__slug=item.slug).exists():\n order_item = OrderItem.objects.filter(\n item=item,\n user=request.user,\n ordered=False\n )[0]\n if order_item.quantity > 1:\n order_item.quantity -= 1\n order_item.save()\n else:\n order.items.remove(order_item)\n messages.info(request, \"This item quantity was updated\")\n return redirect(\"my_app:order-summary\")\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect(\"my_app:product\", slug=slug)\n else:\n messages.info(request, \"You don't have an active order\")\n return redirect(\"my_app:product\", slug=slug)\n\n@login_required\ndef add_single_item_to_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(\n user=request.user,\n ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n if order.items.filter(item__slug=item.slug).exists():\n order_item = OrderItem.objects.filter(\n item=item,\n user=request.user,\n ordered=False\n )[0]\n order_item.quantity += 1\n order_item.save()\n messages.info(request, \"This item quantity was updated\")\n return redirect(\"my_app:order-summary\")\n else:\n messages.info(request, \"This item was not in your cart\")\n return redirect(\"my_app:order-summaryt\")\n else:\n messages.info(request, \"You don't have an active order\")\n return redirect(\"my_app:order-summary\")\n\n\ndef get_coupon(request, code):\n try:\n coupon = Coupon.objects.get(code=code)\n return coupon\n except ObjectDoesNotExist:\n messages.info(request, \"This is not a valid coupon code\") \n return redirect(\"my_app:checkout\")\n\nclass AddCouponView(View):\n def post(self, *args, **kwargs):\n form = CouponForm(request.POST or None)\n if form.is_valid():\n try:\n code = form.cleaned_data.get('code')\n order = Order.objects.get(user=request.user, ordered=False)\n order.coupon = get_coupon(request, code) \n order.save()\n messages.success(request, \"Successful\") \n return redirect(\"my_app:checkout\")\n except ObjectDoesNotExist:\n messages.info(request, \"You don't have an active order\") \n return redirect(\"my_app:checkout\")\n\nclass RequestRefundView(View):\n def get(self, *args, **kwargs):\n form = RefundForm()\n context = {\n 'form': form\n }\n return render(self.request, \"request_refund.html\", context)\n\n def post(self, *args, **kwargs):\n form = RefundForm(self.request.POST or None)\n if form.is_valid():\n ref_code = form.cleaned_data.get('ref_code')\n message = form.cleaned_data.get('message')\n email = form.cleaned_data.get('email')\n try:\n order = Order.objects.get(ref_code=ref_code)\n order.refund_requested = True\n order.save()\n\n refund = Refund()\n refund.order = order\n refund.reason = message\n refund.email = email\n refund.save() \n\n messages.info(self.request, \"Your request was recieved\")\n return redirect(\"my_app:request-refund\")\n\n except ObjectDoesNotExist:\n messages.info(self.request, \"This order does not exist\")\n return redirect(\"my_app:request-refund\")","repo_name":"ogulcandulger/e_commerce_w-Django","sub_path":"my_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"12892367076","text":"import numpy as np\nfrom flask import Flask, jsonify, make_response\nfrom flask_restplus import Resource, Api, fields\nfrom flask import request, stream_with_context, Response\nfrom flask_cors import CORS\nimport json, csv\nfrom werkzeug.utils import cached_property\nimport pickle\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport yfinance as yf\nimport pandas_datareader as pdr\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app)\n\nname = api.model('name', {\n 'name_id': fields.String(description='Enter Name', required=True, example='ABCD')\n })\n\nmodel_new = keras.models.load_model('my_model2')\n\n# @app.route('/')\n# def home():\n# return render_template('index.html')\n\n@api.route('/prediction')\nclass LargeNew(Resource):\n @api.expect(name)\n def post(self):\n data = request.json\n name_id = data['name_id']\n print(name_id)\n import pandas_datareader as pdr\n import pandas as pd\n msft = yf.Ticker(name_id)\n df_ori = msft.history('10y',interval='1d')\n df=df_ori.reset_index()\n df=df[0:2461]\n df.tail(3)\n\n df1=df[['Date','Close']]\n df1['Date']=pd.DatetimeIndex(df1['Date'])\n df1['Date2']=pd.DatetimeIndex(df1['Date']).date\n\n\n print(df1.shape)\n\n df1.head(3)\n\n del df1['Date']\n df1=df1.set_index('Date2')\n print(df1.shape)\n\n import pandas as pd\n# df=pd.read_csv('AAPL.csv')\n\n# df1=df.reset_index()['close']\n df=df1.copy()\n train = df[0:1800]\n valid = df[1800:]\n #creating dataframe\n # training_size=int(len(df1)*0.65)\n # test_size=len(df1)-training_size\n # train_data,test_data=df1[0:training_size,:],df1[training_size:len(df1),:1]\n # print('train_data',train_data.shape)\n # print('test_data',test_data.shape)\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n valid_scaled_data = scaler.fit_transform(valid)\n \n\n x_input=valid_scaled_data[valid_scaled_data.shape[0]-100:].reshape(1,-1)\n x_input.shape\n \n temp_input=list(x_input)\n temp_input=temp_input[0].tolist()\n print('tempinput',len(temp_input))\n # demonstrate prediction for next 10 days\n # demonstrate prediction for next 10 days\n from numpy import array\n\n lst_output=[]\n n_steps=100\n i=0\n while(i<7):\n\n if(len(temp_input)>100):\n #print(temp_input)\n x_input=np.array(temp_input[1:])\n print(\"{} day input {}\".format(i,x_input))\n x_input=x_input.reshape(1,-1)\n x_input = x_input.reshape((1, n_steps, 1))\n #print(x_input)\n yhat = model_new.predict(x_input, verbose=0)\n print(\"{} day output {}\".format(i,yhat))\n temp_input.extend(yhat[0].tolist())\n temp_input=temp_input[1:]\n #print(temp_input)\n lst_output.extend(yhat.tolist())\n i=i+1\n else:\n x_input = x_input.reshape((1, n_steps,1))\n yhat = model_new.predict(x_input, verbose=0)\n print(yhat[0])\n temp_input.extend(yhat[0].tolist())\n print(len(temp_input))\n lst_output.extend(yhat.tolist())\n i=i+1\n\n\n # print(lst_output)\n # day_new=np.arange(1,101)\n # day_pred=np.arange(101,131)\n # df3=df1.tolist()\n # df3.extend(lst_output)\n\n # # df3=scaler.inverse_transform(df3).tolist()\n # df3=scaler.inverse_transform(df3)\n # pred=pd.DataFrame(df3)\n # pred['Time']='2020-11-12'\n # pred.columns=['pred','timestamp']\n # print(pred.head(3))\n # print('out',json.dumps(pred.to_dict(orient='records')))\n # # pred.head(3)\n # response = make_response(json.dumps(pred.to_dict(orient='records')))\n # # response.headers['content-type'] = 'application/octet-stream'\n # return response\n \n# plt.plot(day_pred,scaler.inverse_transform(lst_output),color='blue')\n # df1['Date']=pd.DatetimeIndex(df1['Date'])\n print('output',len(lst_output))\n last=df1.index[-1]\n\n future_dates=pd.date_range(start=last, periods=7)\n future_dataset=pd.DataFrame()\n future_dataset=pd.DataFrame()\n future_dataset['date']=future_dates\n future_dataset['Close']=scaler.inverse_transform(lst_output)\n\n future_dataset['date']=pd.DatetimeIndex(future_dataset['date'])\n future_dataset['date']=pd.DatetimeIndex(future_dataset['date']).date\n future_dataset['weekday']=pd.DatetimeIndex(future_dataset['date']).weekday\n future_dataset=future_dataset[future_dataset['weekday'].isin([0,1,2,3,4])]\n future_dataset['date']=future_dataset['date'].astype('str')\n del future_dataset['weekday']\n\n future_dataset\n print(future_dataset.shape)\n print(future_dataset.head(3))\n print('out',json.dumps(future_dataset.to_dict(orient='records')))\n response = make_response(json.dumps(future_dataset.to_dict(orient='records')))\n response.headers['content-type'] = 'application/octet-stream'\n return response\n \n\nif __name__ == \"__main__\":\n app.run(port=6002,debug=True)\n","repo_name":"rozytaker/flask_del","sub_path":"stock_product_noretrain.py","file_name":"stock_product_noretrain.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20250202957","text":"# 3 amigos jogam na loteria, caso ganhem o premio deve ser repartido proporcionalmente.\n# Ler qnt cada um investiu, o valor do premio e imprimir quanto cada um ganharia\npremio = float(input('Digite o valor do prêmio: R$ '))\namg1 = float(input('Quanto o primeiro amigo apostou? R$ '))\namg2 = float(input('Quanto o segundo amigo apostou? R$ '))\namg3 = float(input('Quanto o terceiro amigo apostou? R$ '))\n\ntotal = amg1 + amg2 + amg3\nporc_amg1 = ((100 * amg1) / total) / 100\nporc_amg2 = ((100 * amg2) / total) / 100\nporc_amg3 = ((100 * amg3) / total) / 100\n\nprint(f'O primeiro amigo ganha R$ {(porc_amg1 * premio):.2f}\\n'\n f'O segundo amigo ganha R$ {(porc_amg2 * premio):.2f}\\n'\n f'O terceiro amigo ganha R$ {(porc_amg3 * premio):.2f}')\n","repo_name":"leandro-alvesc/estudos_python","sub_path":"guppe/exercicios/secao_04/ex052.py","file_name":"ex052.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"13319210898","text":"import os\nimport time\nfrom mmdet.apis import inference_detector, init_detector\n\nupload_dir = '/Users/cyy/cv_exhibition/upload'\nresult_dir = '/Users/cyy/cv_exhibition/result'\nconfig = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\ncheckpoint = 'faster_rcnn.pth'\nmodel = init_detector(config, checkpoint, device='cpu')\n\ndef single_pic(upload,result):\n try:\n res = inference_detector(model, upload)\n model.show_result(upload,res,out_file=result)\n except:\n os.mknod(result)\n\n\ndef scan_dirs():\n uploaded = set()\n for file in os.listdir(upload_dir):\n if not os.path.isdir(file):\n uploaded.add(file)\n processed = set()\n for file in os.listdir(result_dir):\n if not os.path.isdir(file):\n processed.add(file)\n need_to_do = uploaded - processed\n for x in need_to_do:\n single_pic(upload_dir+'/'+x,result_dir+'/'+x)\n \nwhile True:\n scan_dirs()\n time.sleep(1)\n ","repo_name":"cyyself/cv_exhibition","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"18299245449","text":"#!/usr/bin/env python3\n\"\"\"\ncreate a list of (1000) ids for testing\n\noutputs a plain text file with JSON formatting\n\tdue to arduino file limitations (8+3)\n\ncan be run via python(3) or $ chmod +x id_gen.test.py && ./id_gen...\n\"\"\"\nimport sys\nimport random\n\nwith open(\"ids.txt\", \"w\", encoding=\"text/plain\") as sys.stdout:\n ACC = []\n for i in range(1000):\n NUM = round(random.randint(1, 1000000000))\n NUM_STR = str(NUM)\n while len(NUM_STR) < 10:\n NUM_STR = \"0\" + NUM_STR\n ACC.append(NUM_STR)\n print({\"url\": \"www.example.com\", \"ids\": ACC})\n","repo_name":"JoshMLeslie/arduino","sub_path":"60ar_teensy/id_gen.test.py","file_name":"id_gen.test.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27808317542","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport dtw.dtw\nimport numpy as np\nfrom matplotlib.ticker import FormatStrFormatter\n\n\"\"\"\nThis script will show a dynamic time warping 'heat-path' plot.\nThis is a simple example with sin and cosine. The sine wave is also\nplotted on the top and the cosine on the left.\n\"\"\"\n\nx = np.linspace(0, 2*np.pi, 201)\nsin_x = np.sin(x).reshape(-1, 1)\ncos_x = np.cos(x).reshape(-1, 1)\n\ndist, cost, acc, path = dtw.dtw(sin_x, cos_x, dist=lambda x, y: np.linalg.norm(x - y, ord=1))\n\n# definitions for the axes\nleft, width = 0.12, 0.60\nbottom, height = 0.08, 0.60\nbottom_h = 0.16 + width\nleft_h = left + 0.27\nrect_plot = [left_h, bottom, width, height]\nrect_x = [left_h, bottom_h, width, 0.2]\nrect_y = [left, bottom, 0.2, height]\n\n\n# start with a rectangular Figure\nfig = plt.figure(2, figsize=(8, 8))\nfig.suptitle(\"sin(x) Vs. cos(x) Dynamic Time warping\", fontsize=14)\n\naxplot = plt.axes(rect_plot)\naxx = plt.axes(rect_x)\naxy = plt.axes(rect_y)\n\n# Plot the matrix\naxplot.pcolor(acc.T, cmap='nipy_spectral')\naxplot.plot(path[0], path[1], 'w')\n\n\naxplot.set_xlim((0, len(x)))\naxplot.set_ylim((0, len(sin_x)))\naxplot.tick_params(axis='both', which='major', labelsize=12)\n\n# Plot time serie horizontal\naxx.plot(sin_x, '.', color='k')\naxx.tick_params(axis='both', which='major', labelsize=12)\nxloc = plt.MaxNLocator(4)\nx2Formatter = FormatStrFormatter('%d')\naxx.yaxis.set_major_locator(xloc)\naxx.yaxis.set_major_formatter(x2Formatter)\n\n# Plot time serie vertical\naxy.plot(np.cos(x), (100.0 / np.pi) * x, '.', color='k')\naxy.invert_xaxis()\nyloc = plt.MaxNLocator(4)\nxFormatter = FormatStrFormatter('%d')\naxy.xaxis.set_major_locator(yloc)\naxy.xaxis.set_major_formatter(xFormatter)\naxy.tick_params(axis='both', which='major', labelsize=18)\n\n#Limits\naxx.set_xlim(axplot.get_xlim())\n\naxy.set_ylim(axplot.get_ylim())\nplt.xlabel(\"cos(x)\")\nplt.ylabel(\"x\")\n\nplt.show()\n","repo_name":"Wubuntu88/BirdCallClassification","sub_path":"DTW/dtw_plotting_test.py","file_name":"dtw_plotting_test.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13162117517","text":"from middleLayer import middleLayer\r\nimport pymongo\r\nfrom datetime import datetime\r\nfrom getpass import getpass\r\nfrom pprint import pprint\r\nfrom neo4j import GraphDatabase\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport re\r\n\r\nclass frontEnd:\r\n \r\n def __init__(self):\r\n \r\n self.firstLevelCommands=[\"!browseNations\",\"!findHotel\",\"!findReviewer\",\"!reccomendedHotelForReviewer\",\"!deleteHotel\",\"!deleteReviewer\",\"!deleteReview\",\"!commands\",\"!login\",\"!logout\",\"!quit\"]\r\n self.firstLevelDescriptions=[\": read all the nation\",\": search an hotel and read its informations\",\": search a reviewer and read its information\",\": read the suggested hotel for a reviewer\",\": delete an hotel\",\": delete a reviewer\",\": delete the review whose number is in the list of the reviews you've read\",\": read all the commands\",\": login in with your credentials\",\"\",\": exit from the application\"]\r\n self.secondLevelDescription=[ \": show the statistics of a nation of the list\",\": show the analytics of a nation of the list\",\": read the cities of a nation\",\": show the most popular hotels of the nation\",\": show the most popular reviewers of the nation\",\": show a list of possible fake reviewers\",\": delete a nation(only if you're an admin)\",\": read all the commands\",\": login in with your credentials\",\"\",\": return to the main level\",\": exit from the application\"] \r\n self.secondLevelCommands=[\"!showStatistics\",\"!showAnalytics\",\"!browseCities\",\"!popularHotels\",\"!popularReviewers\",\"!fakeReviewers\",\"!delete\",\"!commands\",\"!login\",\"!logout\",\"!back\",\"!quit\"] \r\n self.thirdLevelCommands=[\"!showStatistics\",\"!showAnalytics\",\"!popularHotels\",\"!popularReviewers\",\"!delete\",\"!commands\",\"!login\",\"!logout\",\"!back\",\"!quit\"]\r\n self.thirdLevelDescription=[ \": show the statistics of a city of the list\",\": show the analytics of a city of the list\",\": show the most popular hotels of the city\",\": show the most popular reviewers of the nation\",\": delete a city(only if you're an admin)\",\": read all the commands\",\": login in with your credentials\",\"\",\": return to the nation level\",\": exit from the application\"] \r\n self.typeUser=\"generic\"\r\n self.level=\"first\"\r\n self.middleLayer=middleLayer()\r\n \r\n def showCommands(self):\r\n commands=[]\r\n descriptions=[]\r\n if(self.level==\"first\"):\r\n commands=self.firstLevelCommands\r\n descriptions=self.firstLevelDescriptions\r\n elif(self.level==\"second\"):\r\n commands=self.secondLevelCommands\r\n descriptions=self.secondLevelDescription\r\n else:\r\n commands=self.thirdLevelCommands\r\n descriptions=self.thirdLevelDescription\r\n numberOption=len(commands)\r\n index=0;\r\n while index `has not been identified yet.`\")\n\t\telse:\n\t\t\tself.db.remove_user_to_guild(user_id, guild_id)\n\t\t\tserver = self.client.get_guild(int(guild_id))\n\t\t\tmember = server.get_member(int(user_id))\n\t\t\tawait self.clean_star_roles(member,server)\n\t\t\tawait ctx.send(f\"<@{user_id}> `has been removed.`\")\n\n\n\n\n\t## Remove any kind of star roles\n\tasync def clean_star_roles(self,member,server):\n\t\troles = member.roles\n\t\tfor r in roles:\n\t\t\trole = get(server.roles, name=r.name)\n\t\t\tif str(role).find(\"★\")!=-1:\n\t\t\t\tawait member.remove_roles(role)\n\n\n\n\t## Set a star role for a user\n\tasync def set_star(self,member,server,rating):\n\t\trole = get(server.roles, name=discord_commons.getStars(rating))\n\t\tawait member.add_roles(role)\n\n\n\t## Make an embed message for a indentified user\n\tdef make_handle_embed(self,member,username,data):\n\t\tdesc = f\"**Handle for <@{member.id}> succesfully set to [{username}](https://www.codechef.com/users/{username})**\"\n\t\tcolour = discord_commons.getDiscordColourByRating(int(data['rating']))\n\t\tembed = discord.Embed(description=desc, color=colour)\n\t\tembed.add_field(name='Rating', value=data['rating'], inline=True)\n\t\tembed.add_field(name='Stars', value=discord_commons.getStars(data['rating']), inline=True)\n\t\tembed.set_thumbnail(url=data['profile_pic'])\n\t\treturn embed\n\n\n\t## Add an identified user to database and send out embed confirmation\n\tasync def identify_user(self,ctx,user_id,guild_id,username,data=None):\n\t\tif data == None:\n\t\t\tdata = cc_commons.getUserData(username)\n\t\tself.db.add_user_to_guild(user_id, guild_id, username)\n\t\tif self.db.fetch_cc_user(username)==None:\n\t\t\tcc_commons.add_cc_user_easy(username, data, self.db)\n\t\telse:\n\t\t\tcc_commons.update_cc_user_easy(username, data, self.db)\n\t\tserver = self.client.get_guild(int(guild_id))\n\t\tmember = server.get_member(int(user_id))\n\t\tawait self.clean_star_roles(member,server)\n\t\tawait self.set_star(member,server,data['rating'])\n\t\tprofile_embed= self.make_handle_embed(member,username,data)\n\t\tawait ctx.send(embed=profile_embed)\n\t\t\n\n\n\n\t## List all users of a server\n\t@handle.command(brief='Check users in server')\n\t@commands.cooldown(1, COOLDOWN-5, commands.BucketType.user)\n\tasync def list(self,ctx):\n\t\t\"\"\"Check Users in Server'\"\"\"\n\t\tres = self.db.fetch_guild_users(str(ctx.message.guild.id))\n\t\tif res==None:\n\t\t\tawait ctx.send(\"```No user has registered their handle```\")\n\t\t\treturn \n\n\t\tres2 = []\n\t\tfor x in res:\n\t\t\ttry:\n\t\t\t\tres2.append([x,self.db.fetch_cc_user(x['cchandle'])['rating'],ctx.guild.get_member(int(x['user_id']))])\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Exception at =handle list\",e)\n\t\tres=res2\n\t\tres= sorted(res, key = lambda x: x[1], reverse=True)\n\n\t\tstyle = table.Style('{:>} {:<} {:<} {:<} {:<}')\n\t\tt = table.Table(style)\n\t\tt += table.Header('#', 'Name', 'Handle', 'Rating','Stars')\n\t\tt += table.Line()\n\t\tidx=1\n\t\tfor x in res:\n\t\t\ttry:\n\t\t\t\tif(x[2]==None):\n\t\t\t\t\tcontinue\n\t\t\t\tif x[2].nick!=None:\n\t\t\t\t\tt += table.Data(idx, x[2].nick, x[0]['cchandle'], x[1],discord_commons.getStars(x[1]))\n\t\t\t\telse:\n\t\t\t\t\tt += table.Data(idx, x[2].name, x[0]['cchandle'], x[1],discord_commons.getStars(x[1]))\n\t\t\t\tidx+=1\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e,\" at handle list\")\n\t\t\t\n\t\thandle_list = '```\\n'+str(t)+'\\n```'\n\t\tembed = discord.Embed(title='Handles of server members',description=handle_list,color=discord_commons.getRandomColour())\n\t\tawait ctx.send(embed=embed)\n\n\n\n\n\t## Role update\n\t\n\t@handle.command(brief='Update roles on a server')\n\t@commands.cooldown(1, COOLDOWN, commands.BucketType.user)\n\tasync def role_update(self,ctx):\n\t\tif str(ctx.author.id) != str(constants.OWNER):\n\t\t\tawait ctx.send(constants.NON_OWNER_MSG)\n\t\t\treturn \n\t\tres = self.db.fetch_distinct_active_handles()\n\t\tif res==None:\n\t\t\tawait ctx.send(\"```No users for role update```\")\n\t\t\treturn \n\t\tcurrent_time = int(time.time())\n\t\tfor username in res:\n\t\t\tcurrent_user_data = self.db.fetch_cc_user(username)\n\t\t\texpiry_time = int(current_user_data['lastupdated'])+constants.USERDATA_UPDATE_COOLDOWN\n\t\t\tif current_time > expiry_time:\n\t\t\t\tcurrent_user_data = cc_commons.getUserData(username)\n\t\t\t\tself.db.update_cc_user(username, current_user_data['name'], current_user_data['profile_pic'], current_user_data['rating'], current_user_data['rating_data'],current_user_data['solved_problems'],current_user_data['submission_stats'])\t\t\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tprint(f\"Skipping {username}\")\n\t\tres = self.db.fetch_active_handles()\n\t\tfor x in res:\n\t\t\tguild_id = x['guild_id']\n\t\t\tuser_id = x['user_id']\n\t\t\tusername = x['username']\n\t\t\trating = self.db.fetch_cc_user(username)['rating']\n\t\t\tserver = self.client.get_guild(int(guild_id))\n\t\t\tmember = server.get_member(int(user_id))\n\t\t\tawait self.clean_star_roles(member,server)\n\t\t\tawait self.set_star(member,server,rating)\n\n\t@identify.error\n\t@list.error\n\t@role_update.error\n\tasync def contest_error(self,ctx, error):\n\t\tif isinstance(error, commands.CommandOnCooldown):\n\t\t\tawait ctx.send(f'```This command is on cooldown, you can use it in {round(error.retry_after, 2)} seconds```')\ndef setup(client):\n\tclient.add_cog(Users(client))","repo_name":"s-i-d-d-i-s/Sparky-Bot","sub_path":"cogs/Users.py","file_name":"Users.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"}
+{"seq_id":"6625210389","text":"import math\r\n\r\na, b = map(int, input().split())\r\nnum = [True] * (b-a+1)\r\n\r\nfor i in range(2,int(math.sqrt(b))+1):\r\n temp = i*i\r\n while temp <= b:\r\n for j in range(int(a/temp) * temp , b+1 , temp):\r\n if j < a:\r\n continue\r\n if num[j-a]:\r\n num[j-a] = False\r\n temp*=i\r\n\r\nprint(num.count(True))","repo_name":"LucaWinds/SolvedBaekjoonProblem","sub_path":"백준/Gold/1016. 제곱 ㄴㄴ 수/제곱 ㄴㄴ 수.py","file_name":"제곱 ㄴㄴ 수.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19109086093","text":"import logging\nimport os\nfrom urllib.parse import urlparse\n\nfrom keeper_secrets_manager_core.configkeys import ConfigKeys\nfrom keeper_secrets_manager_core.keeper_globals import keeper_servers, logger_name\nfrom keeper_secrets_manager_core.storage import KeyValueStorage\n\n\ndef get_server(code_server, config_store: KeyValueStorage):\n\n env_server = os.getenv('KSM_HOSTNAME')\n\n if env_server:\n server_to_use = env_server\n elif config_store.get(ConfigKeys.KEY_HOSTNAME):\n server_to_use = config_store.get(ConfigKeys.KEY_HOSTNAME)\n elif code_server:\n server_to_use = code_server\n else:\n server_to_use = keeper_servers.get('US')\n\n if server_to_use in keeper_servers:\n # Server key was supplied\n server_to_return = keeper_servers.get(server_to_use)\n else:\n # Looks like an URL\n # Un-parsing URL to get only domain:\n if 'http' not in server_to_use:\n server_to_use = 'https://%s' % server_to_use\n\n server_to_return = urlparse(server_to_use).netloc\n\n logging.getLogger(logger_name).debug(\"Keeper hostname %s\" % server_to_return)\n\n return server_to_return\n","repo_name":"inna-btc/secrets-manager","sub_path":"sdk/python/core/keeper_secrets_manager_core/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26290447318","text":"import paho.mqtt.client as mqtt\nimport time\n\ndef play_done_callback(client, userdata, message):\n global play_done\n play_done = True\n print(\"play_done\")\n\ndef rec_done_callback(client, userdata, message):\n global rec_done\n rec_done = True\n print(\"rec_done\")\n\ndef on_message(client, userdata, message):\n print(msg.topic+\" \"+str(msg.payload))\n\ndef Input_parsing(dist, direction, LoS, edist, edirection, location, top, test, duration):\n\n args = \"\"\n if test:\n args = \"{} -t\".format(args)\n\n args = \"{} --distance {}\".format(args, dist)\n args = \"{} --direction {}\".format(args, direction)\n\n if not LoS:\n args = \"{} --LoS\".format(args)\n args = \"{} --edistance {}\".format(args, edist)\n args = \"{} --edirection {}\".format(args, edirection)\n\n if top: \n args = \"{} --top\".format(args)\n\n args = \"{} --location {}\".format(args, location)\n args = \"{} --duration {}\".format(args, duration)\n\n return args\n\n\n\n# User inputs start\n\n# (Geodesic) souce location\ndist = 50 #cm\ndirection = 0\n\n# Line-of-Sight state\nLoS = True\n\n# (Euclidian) source location (only send if LoS == False)\n\nedist = 0 #cm\nedirection = 0\n\n\n# Meta lobation of recording\nlocation = \"H2-IC02\"\n\n# Top state (if ther's an obustructio inbetween the mics)\ntop = True\n\n# Testing flag (set to True to run trough program without actually playing/recording\ntest = False\n\n# Music_files\n# File names\nif test:\n\tM = 2\n\tchirp_types = [\"0s024\"]\nelse:\n\tM = 8\n\tchirp_types = [\"0s024\", \"0s048\"]\n\nmusic_names = []\nfor j in range(len(chirp_types)):\n for i in range(M):\n music_names.append('chirp_train_chirp_{}_{}'.format(chirp_types[j],i))\n \n# Length of the music files (seconds)\nif test:\n\tduration = 2\nelse:\n\tduration = 30\n\n# User inputs end\nmsg = Input_parsing(dist, direction, LoS, edist, edirection, location, top, test, duration)\n\n\nrec_init = False\nplay_init = False\n\nrec_done = rec_init\nplay_done = play_init\n\n#connect to mqtt\nclient = mqtt.Client()\n\nclient.connect(\"Robomindpi-002\")\n\nclient.subscribe(\"rec_done\")\nclient.subscribe(\"play_done\")\n\nclient.message_callback_add(\"rec_done\", rec_done_callback)\nclient.message_callback_add(\"play_done\", play_done_callback)\n\n# loop\nclient.loop_start()\nprint(\"playrec settings \\n{}\\n\".format(msg))\nfor i in range(len(music_names)):\n print(music_names[i])\n \n rec_done = rec_init\n play_done = play_init\n \n tx_args = \"{} --music {}\".format(msg,music_names[i])\n client.publish(\"playrec\", tx_args)\n\n while not (rec_done and play_done):\n pass\n \n time.sleep(1)\n print()\n\nprint(\"done\")\nprint(\"playrec settings \\n{}\\n\".format(msg))\nclient.loop_stop()\n","repo_name":"amjadmajid/aud","sub_path":"Multi_PI_recording/Playrec_control.py","file_name":"Playrec_control.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"21859979724","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom datetime import datetime\n\n\n\"\"\"::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nFOR DIAGNOSTICS TAB DATA\n::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\"\"\"\n\ndef return_engagements(df):\n # tweet count, user count, engagements\n rts = int(df[df[\"type\"] != \"retweet\"]['rtcount'].sum())\n fvs = int(df['fvcount'].sum())\n desc_js = {\"tweets\": len(df), \"users\": df.username.nunique(),\n \"engagements\": rts+fvs}\n dfu = df.groupby([\"username\"], sort=False)[\"flcount\"].max().reset_index()\n rh = dfu['flcount'].sum() + dfu.username.nunique()\n desc_js['reach'] = int(rh)\n return desc_js\n\ndef return_timeline(df):\n # timeline\n tl = pd.DataFrame(df.dateofposting)\n tl['datehour'] = [i.replace(microsecond=0,second=0) for i in tl.dateofposting]\n tl = tl.groupby(['datehour'], as_index=False).count().reset_index()\n del tl['index']\n tl[\"int\"] = [1000*(t.replace(tzinfo=None)-datetime(1970,1,1)).total_seconds() for t in tl.datehour]\n tl.to_csv(\"tl.csv\")\n chartdata = {'x': tl[\"int\"],\n 'name': 'Volume', 'y1': tl['dateofposting'], 'kwargs1': { 'color': '#ef6c00' }\n }\n return chartdata\n\ndef return_composition(df):\n data = dict(df.type.value_counts())\n return data\n\ndef return_source(df):\n src = dict(df.source.value_counts())\n src_ = {}\n src_[\"Web Client\"] = src[\"Twitter Web Client\"]\n src_[\"Android\"] = src[\"Twitter for Android\"]\n src_[\"iPhone\"] = src[\"Twitter for iPhone\"]\n src_[\"Others\"] = sum(src.values()) - src_[\"Web Client\"] - src_[\"Android\"] - src_[\"iPhone\"]\n return src_\n\ndef return_geocode(df):\n data = {}\n for i in df.coordinates:\n if i != None:\n data[len(data)] = {\"lat\": i[1], \"long\": i[0]}\n return data\n\n\n\n\"\"\"::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nFOR INFLUENCERS TAB DATA\n::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\"\"\"\n\n# set value for filter_ to engagements and flcount only\ndef return_influencers(df, filter_):\n cols = [\"name\", \"username\", \"rtcount\", \"fvcount\", \"type\", \"profileimage\", \"flcount\"]\n df = df[cols].reset_index()\n del df['index']\n df = df[df['type'] != 'retweet']\n postcount = df.groupby(['username']).count().reset_index()[['username', 'profileimage']]\n postcount.columns = ['username', 'postcount']\n df = df.groupby(['name', 'username', 'flcount', 'profileimage'], sort=False)['rtcount', 'fvcount'].sum().reset_index()\n df = pd.merge(df, postcount, how='left', on=['username'])\n df['engagements'] = df['rtcount'] + df['fvcount']\n df = df.sort_values(filter_, ascending=False).head(10).reset_index()\n data = {}\n for i in range(5):\n data[i] = {\"name\": df['name'][i], \"username\": \"@%s\" % df['username'][i], \"profileimage\": df['profileimage'][i].replace('_normal',''), \"post\": df['postcount'][i], \"favorites\": df['fvcount'][i], \"retweets\": df['rtcount'][i], \"followers\": df['flcount'][i]}\n return data\n\n\n\n\"\"\"::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nFOR INFLUENTIAL POSTS TAB DATA\n::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\"\"\"\n\ndef return_infl_posts(df):\n cols = [\"name\", \"username\", \"profileimage\", \"flcount\", \"tweet\", \"rtcount\", \"fvcount\", \"type\"]\n df = df[cols].reset_index()\n del df['index']\n df = df[df['type'] != 'retweet']\n df = df.groupby(['name', 'username', 'profileimage', 'tweet'], sort=False)['rtcount', 'fvcount', 'flcount'].max().reset_index()\n df['engagements'] = df['rtcount'] + df['fvcount']\n df = df.sort_values('engagements', ascending=False).head(10).reset_index()\n del df['index']\n data = {\"1st\": {\"name\": df['name'][0], \"username\": \"@%s\" % df['username'][0], \"profileimage\": df['profileimage'][0], \"favorites\": df['fvcount'][0], \"retweets\": df['rtcount'][0], \"tweet\": df[\"tweet\"][0]},\n \"2nd\": {\"name\": df['name'][1], \"username\": \"@%s\" % df['username'][1], \"profileimage\": df['profileimage'][1], \"favorites\": df['fvcount'][1], \"retweets\": df['rtcount'][1], \"tweet\": df[\"tweet\"][1]},\n \"3rd\": {\"name\": df['name'][2], \"username\": \"@%s\" % df['username'][2], \"profileimage\": df['profileimage'][2], \"favorites\": df['fvcount'][2], \"retweets\": df['rtcount'][2], \"tweet\": df[\"tweet\"][2]},\n \"4th\": {\"name\": df['name'][3], \"username\": \"@%s\" % df['username'][3], \"profileimage\": df['profileimage'][3], \"favorites\": df['fvcount'][3], \"retweets\": df['rtcount'][3], \"tweet\": df[\"tweet\"][3]},\n \"5th\": {\"name\": df['name'][4], \"username\": \"@%s\" % df['username'][4], \"profileimage\": df['profileimage'][4], \"favorites\": df['fvcount'][4], \"retweets\": df['rtcount'][4], \"tweet\": df[\"tweet\"][4]}}\n return data\n\n\n\n\"\"\"::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nFOR TOPICS TAB DATA\n::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\"\"\"\n\n# for wordcloud, check wordcloudscript.py\n\n# topic clustering\n# will use hierarchical document clustering\n# check lda.py\n\n\n\n\"\"\"::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nFOR POLARITY TAB DATA\n::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\"\"\"\n\nfrom SMAApp import polarize\n\ndef return_polarity_chartdata(df):\n df['polarity'] = [polarize.polarity(i) for i in df.tweet]\n data = dict(df.polarity.value_counts())\n xdata = [*data]\n ydata = list(data.values())\n ydata = [int(i) for i in ydata]\n extra_serie = {\"tooltip\": {\"y_start\": \"\", \"y_end\": \"\"}}\n chartdata = {'x': xdata, 'y1': ydata, 'name':'Volume', 'extra1': extra_serie\n }\n return chartdata\n \ndef return_polarity(df):\n df['polarity'] = [polarize.polarity(i) for i in df.tweet]\n data = dict(df.polarity.value_counts())\n return data","repo_name":"dang-git/SMA","sub_path":"SMAProject/SMAApp/engagements.py","file_name":"engagements.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35444355984","text":"def group_of_tens(nums):\n max_number = max(nums)\n index_group = max_number // 10\n\n counter_min = 0\n\n for group in range(1, index_group + 2):\n group_min = counter_min * 10 # 0, 10, 20, 30 ... with latency\n group_max = group * 10 # 10, 20, 30 ...\n temp_lst = []\n for current_number in nums:\n\n if current_number in range(group_min + 1, group_max + 1):\n temp_lst.append(current_number)\n\n if not bool(temp_lst) and group == index_group + 1:\n break\n print(f\"Group of {group}0's: {temp_lst}\")\n counter_min += 1\n\n\nnumbers_string = input().split(\", \")\nnumbers_digit = [int(number) for number in numbers_string]\ngroup_of_tens(numbers_digit)\n","repo_name":"maon0002/Programming-Fundamentals-with-Python-September-December-2022","sub_path":"lists_advanced__exercise/07_group_of_10_s.py","file_name":"07_group_of_10_s.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"70192086249","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.addProject, name='add-project'),\n path('getData/', views.getData, name='getData'),\n path('getProjectData/', views.getProjectData, name='getProjectData'),\n path('delete/', views.deleteAllData, name='deleteAllData'),\n path('management/', views.renderManagement, name='manage'),\n path('searchByForm/', views.searchByForm, name='searchByForm'),\n path('sendDataBack/', views.sendDataBack, name='sendDataBack'),\n path('modifyData/', views.modifyIronData, name='modifyIronData'),\n path('export/', views.export_users_xls, name='export'),\n]\n","repo_name":"gfjack/backupSystem","sub_path":"gongsi/backupSystem/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72271267687","text":"from collections.abc import Sequence\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom changan_plugin_pytorch.qtensor import QTensor\nfrom changan_plugin_pytorch.quantization import QuantStub\nfrom torch.quantization import DeQuantStub\n\nfrom cap.models.base_modules import ConvModule2d, VargNASNetBlock\nfrom cap.registry import OBJECT_REGISTRY\n\n__all__ = [\"VargNASNet\", \"get_vargnasnet_stride2channels\"]\n\n\n@dataclass\nclass VargNASBlockConfig:\n in_channels: int\n out_channels: int\n head_op: str\n stack_ops: List[str]\n stack_ops_num: int\n stride: int\n\n\n@OBJECT_REGISTRY.register\nclass VargNASNet(nn.Module):\n \"\"\"\n Module of VargNASNet.\n\n Args:\n net_config (List[VargNASBlockConfig]): network setting.\n num_classes (int): Num classes.\n bn_kwargs (dict): Kwargs of bn layer.\n disable_quanti_input (bool): whether quanti input.\n fc_filter(int): the out_channels of the last_conv.\n include_top (bool): Whether to include output layer.\n flat_output (bool): Whether to view the output tensor.\n bias (bool): Whehter to use bias.\n input_channels (int): Input image channels, first conv input\n channels is input_channels times input_sequence_length.\n input_sequence_length (int): Input sequence length, used in\n multiple input images case.\n \"\"\"\n\n def __init__(\n self,\n net_config: List[VargNASBlockConfig],\n num_classes: int,\n bn_kwargs: dict,\n disable_quanti_input: bool = False,\n fc_filter: int = 1024,\n include_top: bool = True,\n flat_output: bool = True,\n bias: bool = False,\n input_channels: int = 3,\n input_sequence_length: int = 1,\n ):\n super(VargNASNet, self).__init__()\n self.num_classes = num_classes\n self.disable_quanti_input = disable_quanti_input\n self.fc_filter = fc_filter\n self.include_top = include_top\n self.flat_output = flat_output\n self.bias = bias\n self.bn_kwargs = bn_kwargs\n self.input_sequence_length = input_sequence_length\n\n self.quant = QuantStub()\n if input_sequence_length > 1:\n self.cat_op = nn.quantized.FloatFunctional()\n for i in range(1, input_sequence_length):\n setattr(self, f\"extra_quant_{i}\", QuantStub())\n self.dequant = DeQuantStub()\n\n self.mod1 = ConvModule2d(\n in_channels=input_channels * input_sequence_length,\n out_channels=net_config[0].in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=bias,\n norm_layer=nn.BatchNorm2d(net_config[0].in_channels, **bn_kwargs),\n )\n stage_config = self._split_stage(net_config)\n\n self.mod2 = self._make_stage(stage_config[0])\n self.mod3 = self._make_stage(stage_config[1])\n self.mod4 = self._make_stage(stage_config[2])\n self.mod5 = self._make_stage(stage_config[3])\n\n if self.include_top:\n self.output = nn.Sequential(\n ConvModule2d(\n in_channels=net_config[-1].out_channels,\n out_channels=self.fc_filter,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n norm_layer=nn.BatchNorm2d(self.fc_filter, **bn_kwargs),\n act_layer=nn.ReLU(inplace=True),\n ),\n nn.AvgPool2d(7, stride=1),\n ConvModule2d(\n in_channels=self.fc_filter,\n out_channels=self.num_classes,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n norm_layer=nn.BatchNorm2d(self.num_classes, **bn_kwargs),\n ),\n )\n else:\n self.output = None\n\n def _split_stage(self, net_config):\n # TODO (mengao.zhao, v0.1), more flexible\n stage_config = []\n stage_config.append([net_config[0], net_config[1]])\n stage_config.append([net_config[2]])\n stage_config.append([net_config[3], net_config[4]])\n stage_config.append([net_config[5], net_config[6]])\n assert len(stage_config) == 4\n return stage_config\n\n def _make_stage(self, stage_config):\n layers = []\n for config_i in stage_config:\n layers.append(\n VargNASNetBlock(\n in_ch=config_i.in_channels,\n block_ch=config_i.out_channels,\n head_op=config_i.head_op,\n stack_ops=config_i.stack_ops,\n stride=config_i.stride,\n bias=self.bias,\n bn_kwargs=self.bn_kwargs,\n )\n )\n return torch.nn.Sequential(*layers)\n\n def forward(self, x):\n output = []\n if self.input_sequence_length > 1:\n assert len(x) == self.input_sequence_length\n x = self.process_sequence_input(x)\n else:\n if isinstance(x, Sequence) and len(x) == 1:\n x = x[0]\n x = x if self.disable_quanti_input else self.quant(x)\n for module in [self.mod1, self.mod2, self.mod3, self.mod4, self.mod5]:\n x = module(x)\n output.append(x)\n if not self.include_top:\n return output\n x = self.output(x)\n x = self.dequant(x)\n if self.flat_output:\n x = x.view(-1, self.num_classes)\n return x\n\n def fuse_model(self):\n self.mod1.fuse_model()\n modules = [self.mod2, self.mod3, self.mod4, self.mod5]\n if self.include_top:\n modules += [self.output]\n for module in modules:\n for m in module:\n if hasattr(m, \"fuse_model\"):\n m.fuse_model()\n\n def set_qconfig(self):\n from cap.utils import qconfig_manager\n\n if self.include_top:\n # disable output quantization for last quanti layer.\n getattr(\n self.output, \"2\"\n ).qconfig = qconfig_manager.get_default_qat_out_qconfig()\n\n def process_sequence_input(self, x: List) -> Union[torch.Tensor, QTensor]:\n \"\"\"Process sequence input with cap.\"\"\"\n assert self.input_sequence_length > 1\n assert isinstance(x, Sequence)\n x_list = []\n for i in range(0, self.input_sequence_length):\n if i == 0:\n quant = self.quant\n else:\n quant = getattr(self, f\"extra_quant_{i}\")\n x_list.append(x[i] if self.disable_quanti_input else quant(x[i]))\n return self.cat_op.cap(x_list, dim=1)\n\n\ndef get_vargnasnet_stride2channels(\n net_config: List[VargNASBlockConfig],\n strides: Optional[List[int]] = None,\n) -> Dict:\n \"\"\"\n Get vargnasnet stride to channel dict with giving channels and strides.\n\n Args:\n net_config: network setting\n strides: stride list corresponding to channels.\n\n Returns\n strides2channels: a stride to channel dict.\n \"\"\"\n if strides is None:\n strides = [2, 4, 8, 16, 32, 64, 128, 256]\n\n net_stride_list = [config_i.stride for config_i in net_config]\n channels = []\n\n if net_stride_list[0] == 2:\n channels.append(net_config[0].in_channels)\n for i in range(len(net_stride_list) - 1):\n if net_stride_list[i + 1] == 2:\n channels.append(net_config[i].out_channels)\n channels.append(net_config[-1].out_channels)\n channels = channels + [net_config[-1].out_channels] * (\n len(strides) - len(channels)\n )\n\n strides2channels = {}\n for s, c in zip(strides, channels):\n strides2channels[s] = c\n return strides2channels\n","repo_name":"xingyun-xy/cap","sub_path":"cap/models/backbones/vargnasnet.py","file_name":"vargnasnet.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16822699107","text":"import tensorflow as tf\n\n\n# 创建TFRecord文件的帮助函数\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\n# 1. 生成文件存储样例数据。\ndef produce():\n # 总计写入多少个文件\n num_shards = 2\n # instances_per_shard定义了每个文件中有多少个数据\n instances_per_shard = 2\n\n for i in range(num_shards):\n filename = ('data.tfrecords-%.5d-of-%.5d' % (i, num_shards))\n writer = tf.python_io.TFRecordWriter(filename)\n # 将数据封装成Example结构并写入TFRecord文件\n for j in range(instances_per_shard):\n # Example 结构仅包含当前样例属于第几个文件以及是当前文件的第几个样本\n example = tf.train.Example(features=tf.train.Features(feature={\n 'i': _int64_feature(i),\n 'j': _int64_feature(j)}))\n writer.write(example.SerializeToString())\n writer.close()\n\n\n# 2. 读取文件。\ndef readData():\n # 正则获取文件列表\n files = tf.train.match_filenames_once(\"data.tfrecords-*\")\n # 输入队列\n filename_queue = tf.train.string_input_producer(files, shuffle=False)\n\n # 如图7.1节中所示,读取并解析一个样本\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'i': tf.FixedLenFeature([], tf.int64),\n 'j': tf.FixedLenFeature([], tf.int64),\n })\n\n with tf.Session() as sess:\n tf.local_variables_initializer().run()\n print(sess.run(files))\n\n # 声明tf.train.Coordinator类来协同不同线程,并启动线程\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # 多次执行获取数据的操作\n for i in range(6):\n print(sess.run([features['i'], features['j']]))\n coord.request_stop()\n coord.join()\n\n\nif __name__ == '__main__':\n # produce()\n readData()\n","repo_name":"taoshiqian/ImageProcessTensorflow","sub_path":"DataProcess/tf4_input_tfrecords.py","file_name":"tf4_input_tfrecords.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"37031789481","text":"import os\nimport argparse\nimport logging\nimport json\n\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SequentialSampler\nfrom transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM\n\nfrom src.utils.utils import build_compute_metrics_fn_gpt2, remove_v_head, add_special_tokens_\nfrom src.data_utils.canard import load_canard\nfrom src.data_utils.qrecc import load_qrecc\nfrom src.data_utils.qr_data_utils import Seq2SeqDataCollator, DecoderOnlyCollator\nfrom src.data_utils.qa_rewrite import load_qa_datasets\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", datefmt=\"%m/%d/%Y %H:%M:%S\", level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\ndef Inference(args):\n os.makedirs(f\"{args.save_path}/{args.exp}\", exist_ok=True)\n\n # Initialize the model and tokenizer\n model_name_or_path = os.path.join(args.model_folder, args.exp) + \"/checkpoint-\" + args.checkpoint if args.checkpoint is not None else os.path.join(args.model_folder, args.exp)\n if args.checkpoint is None:\n args.checkpoint = \"best\"\n\n config = AutoConfig.from_pretrained(model_name_or_path) \n try:\n tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)\n reload_special_tokens = False\n except:\n tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model)\n reload_special_tokens = True\n if \"gpt2\" in args.pretrained_model:\n model, loading_info = AutoModelForCausalLM.from_pretrained(model_name_or_path, config=config, output_loading_info=True)\n loading_info[\"unexpected_keys\"] = remove_v_head(loading_info[\"unexpected_keys\"])\n assert len(loading_info[\"missing_keys\"]) == len(loading_info[\"unexpected_keys\"])\n else:\n model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path, config=config, output_loading_info=True)\n assert len(loading_info[\"missing_keys\"]) == len(loading_info[\"unexpected_keys\"])\n\n assert len(loading_info[\"missing_keys\"]) == len(loading_info[\"unexpected_keys\"])\n assert len(loading_info[\"missing_keys\"]) == 0\n assert tokenizer.pad_token_id is not None\n\n if reload_special_tokens:\n add_special_tokens_(model, tokenizer)\n model.config.pad_token_id = tokenizer.pad_token_id\n\n # Load ckpt\n if args.ckpt != '':\n logger.info(\"Load the fine-tuned model...\")\n model.load_state_dict(torch.load(args.ckpt),strict=False)\n model.to(args.device)\n\n # get dataloaders\n args.inference = True\n model_type = \"decoder_only\" if \"gpt2\" in args.pretrained_model else \"seq2seq\"\n if args.dataset == 'canard':\n lm_datasets = load_canard(args, tokenizer, overwrite_cache=args.overwrite_cache, model_type=model_type)\n elif args.dataset == 'qrecc':\n lm_datasets = load_qrecc(args, tokenizer, overwrite_cache=args.overwrite_cache, model_type=model_type)\n elif args.dataset == 'coqa' or args.dataset == 'quac':\n lm_datasets = load_qa_datasets(args, tokenizer, data_dir=args.data_dir, output_dir=args.save_path, overwrite_cache=args.overwrite_cache, model_type=model_type)\n else:\n raise ValueError(\"Invalid dataset!\")\n\n if tokenizer.sep_token is None:\n stop_token = tokenizer.eos_token\n else:\n stop_token = tokenizer.sep_token\n \n print(f\"The stop token is {stop_token}\")\n \n if os.path.exists(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_generation.txt') and \\\n os.path.exists(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_gold.txt') and not args.overwrite and not args.debug:\n print(\"The result already exists! Skip inference!\")\n print(\"Evaluation starts!\")\n Evaluate(args)\n exit()\n \n loader = DataLoader(\n lm_datasets[args.split], \n batch_size=args.eval_bsz, \n sampler=SequentialSampler(lm_datasets[args.split]),\n collate_fn=DecoderOnlyCollator(tokenizer.pad_token_id) if (model_type == \"decoder_only\" and args.batchify) else Seq2SeqDataCollator(tokenizer.pad_token_id),\n shuffle=False\n )\n\n generated_sequences = []\n golden_sequences = []\n for batch in tqdm(loader, desc=f'Inference', total=len(loader), ncols=100):\n input_gen_len = batch['input_ids'].shape[1] if model_type == \"decoder_only\" else 0\n input_ids, attention_mask = batch[\"input_ids\"], batch[\"attention_mask\"]\n \n gen_kwargs = {\n \"top_k\": args.k,\n \"top_p\": args.p,\n \"do_sample\": args.sampling,\n \"pad_token_id\": tokenizer.pad_token_id,\n \"num_beams\": 5, \n \"temperature\": args.temperature,\n \"max_length\": args.length + input_gen_len, \n \"min_length\": 5,\n \"repetition_penalty\": args.repetition_penalty,\n }\n\n if \"token_type_ids\" in batch:\n token_type_ids = batch[\"token_type_ids\"]\n gen_kwargs.update({\"token_type_ids\": token_type_ids.to(args.device)}) \n \n if model_type == \"seq2seq\":\n gen_kwargs.update({\"decoder_start_token_id\": tokenizer.bos_token_id})\n \n generated_sequence = model.generate(\n input_ids=input_ids.to(args.device),\n attention_mask=attention_mask.to(args.device),\n **gen_kwargs,\n )\n \n for generated_sequence, response in zip(generated_sequence[:, input_gen_len:], batch[\"labels\"]):\n if args.debug:\n logger.info(f\"The shape of the output sequences {len(generated_sequence)}.\")\n\n # Decode text\n text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True, skip_special_tokens=True) # DO NOT skip_special_tokens\n if not args.debug:\n # Remove all text after the stop token\n text = text[: text.find(stop_token) if stop_token and text.find(stop_token)>0 else None]\n generated_sequences.append(text)\n \n response = response[response != -100]\n response_text = tokenizer.decode(response, clean_up_tokenization_spaces=True, skip_special_tokens=True)\n golden_sequences.append(response_text)\n \n if args.debug:\n print(f\"The generated sentence is: {text}\")\n # print(f\"The golden sentence is: {response_text}\")\n print(\"=\"*80)\n input()\n\n if not args.debug:\n with open(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_generation.txt', \"w\") as f:\n for line in generated_sequences:\n f.write(line.replace(\"\\n\", \" \")+\"\\n\")\n with open(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_gold.txt', \"w\") as f:\n for line in golden_sequences:\n f.write(line.replace(\"\\n\", \" \")+\"\\n\")\n\ndef Evaluate(args):\n if os.path.exists(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_generation.txt') and os.path.exists(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_gold.txt'):\n print(f\"Evaluation generation {args.save_path}/{args.exp}.\")\n\n metric_fn = build_compute_metrics_fn_gpt2(\"rouge1_recall\" if args.dataset == \"qrecc\" else \"bleu\")\n\n preds = []\n with open(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_generation.txt', \"r\") as f:\n lines = f.readlines()\n for line in lines:\n preds.append(line.strip())\n \n golds = []\n with open(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_gold.txt', \"r\") as f:\n lines = f.readlines()\n for line in lines:\n golds.append(line.strip())\n \n results = metric_fn(preds, golds) if args.dataset == \"qrecc\" else metric_fn(f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_generation.txt', f'{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_gold.txt')\n\n print(results)\n with open(f\"{args.save_path}/{args.exp}/{args.split}_{args.checkpoint}_result.json\", \"w\") as f:\n json.dump(results, f)\n else:\n raise ValueError(\"Please do inference first!\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # model settings\n parser.add_argument(\n \"--model_folder\",\n default=\"./save\",\n type=str,\n )\n parser.add_argument(\n \"--ckpt\",\n default=\"\",\n type=str,\n )\n parser.add_argument('--exp', type=str, default=\"gpt2-canard\")\n parser.add_argument('--checkpoint', type=str, default=None)\n parser.add_argument(\"--bsz\", type=int, default=2)\n parser.add_argument(\"--eval_bsz\", type=int, default=2)\n # generation settings\n parser.add_argument(\"--length\", type=int, default=200)\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=1.0,\n help=\"temperature of 1.0 has no effect, lower tend toward greedy sampling\",\n )\n parser.add_argument(\n \"--repetition_penalty\", type=float, default=1.0, help=\"primarily useful for CTRL model; in that case, use 1.2\"\n )\n parser.add_argument(\"--sampling\", action=\"store_true\")\n parser.add_argument(\"--k\", type=int, default=0)\n parser.add_argument(\"--p\", type=float, default=0.9)\n\n # data settings\n parser.add_argument('--dataset', type=str, default=\"canard\")\n parser.add_argument('--data_dir', type=str, default=\"./data/canard\")\n parser.add_argument('--save_path', type=str, default=\"./save\")\n parser.add_argument('--pretrained_model', type=str, default=\"gpt2\") \n parser.add_argument('--max_seq_length', type=int, default=256) # 256 if --history_in_context\n parser.add_argument('--history_len', type=int, default=3) # history length\n parser.add_argument('--split', type=str, default=\"test\")\n\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\"--num_return_sequences\", type=int, default=1, help=\"The number of samples to generate.\")\n parser.add_argument('-cu', '--cuda', help='Cude device number', type=str, required=False, default='5')\n parser.add_argument('--gold_file', help='gold generation file path', type=str, required=False, default='data/canard/test-gold.txt')\n # parser.add_argument(\"--add_special_tokens\", action=\"store_true\", help=\"Whether to add special tokens in the input sequence.\")\n\n # for debug\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Enter DEBUG mode\")\n parser.add_argument(\"--overwrite\", help=\"Overwrite the inference results even though it exists already\", type=bool, default=False)\n\n parser.add_argument(\n \"--preprocessing_num_workers\",\n default=None,\n type=int,\n help=\"The number of processes to use for the preprocessing.\",\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\n \"--batchify\", action=\"store_true\", help=\"Prepare the dataset in batch mode.\"\n )\n args = parser.parse_args()\n\n\n # args.device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.cuda\n args.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n Inference(args)","repo_name":"HLTCHKUST/cqr4cqa","sub_path":"infer_qrewrite.py","file_name":"infer_qrewrite.py","file_ext":"py","file_size_in_byte":11634,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"}
+{"seq_id":"6700121512","text":"import requests\nimport json\nimport sys\nimport csv\nimport re\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nlem = WordNetLemmatizer()\n\ndef ocr_space_file(filename, overlay=False, api_key='K83280661788957', language='eng'):\n payload = {'isOverlayRequired': overlay,\n 'apikey': api_key,\n 'language': language,\n }\n with open(filename, 'rb') as f:\n r = requests.post('https://api.ocr.space/parse/image',\n files={filename: f},\n data=payload,\n )\n return r.content.decode()\n\ndef find_matching_sentences(filename):\n # Input File:\n test_file = ocr_space_file(filename=sys.argv[1], language='eng')\n data = test_file\n parsed_data = json.loads(data)\n # Extracting the ParsedText\n parsed_text = parsed_data[\"ParsedResults\"][0][\"ParsedText\"]\n words_to_remove = [\"dr\",\"do\",\"is\",\"that\",\"mg\",\"for\",\"on\",\"pm\",\"am\",\"tablet\",\"and\",\"no\",\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"/\",\"with\",\"or\",\"(\",\")\",\".\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\",\"acid\"]\n medicine_names = [line.strip() for line in parsed_text.split('\\n')]\n words = \"\".join(medicine_names)\n cleaned_text = re.sub(r'\\d+(\\.\\d+)?', '', words)\n cleaned_text = re.sub(r'[\\[\\]{}()%.]', '', cleaned_text)\n words = \"\".join(cleaned_text)\n main_words = words.split()\n filtered_words = [word for word in main_words if word.lower() not in words_to_remove]\n # Joining the filtered words back into a string\n filtered_words = [lem.lemmatize(word) for word in filtered_words]\n result_string = \" \".join(filtered_words)\n mylist = []\n flag = False\n with open(\"medicine_data.csv\", mode='r') as file:\n csv_reader = csv.DictReader(file)\n for row in csv_reader:\n full_sentence = row[\"Med_Name\"]\n for word in result_string.split():\n for i in full_sentence.split():\n if ((word.isalpha() != 0) and (word.isdigit() != 1) and word.lower() == i.lower() and (word.lower() != 'tablet' and word.lower() != 'mg') and word.lower() not in words_to_remove):\n mylist.append(full_sentence)\n flag = True\n break\n myset = {i for i in mylist}\n return myset\n\nmatched_sentences = find_matching_sentences(filename=sys.argv[1]) #List of all Medicine names\nprint(matched_sentences)\n\n\n\n\n\n","repo_name":"pojha1401/minor_project","sub_path":"OCR.py","file_name":"OCR.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5186679045","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 6 02:10:52 2017\n\n@author: Shashwat Pathak\n\"\"\"\n\nimport os\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\npath = os.getcwd()\n\ntext = open(path+'/data/raw/obamaspeech.txt').read()\n\nwordcloud = WordCloud().generate(text)\n\nwordcloud = WordCloud(max_font_size=40).generate(text)\nplt.figure()\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis(\"off\")\nplt.savefig(path+'/reports/figures/obamaspeech.png')\n","repo_name":"iceman121/speeches","sub_path":"src/visualization/visualize_wordcloud.py","file_name":"visualize_wordcloud.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34981449963","text":"class Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n \n index = 0\n size = len(nums)\n if size==1:\n return nums[0]\n \n max_sum = current_sum = nums[0]\n \n for element in nums[1:]:\n \n # if the current_sum is negative, then we start over\n # making the current_sum equal to the positive number\n \"\"\"\n 1 -2 2\n 1st iteration: currentsum = 1\n 2nd iteration: currentsum = -2, 1 -2 = -1\n 3rd iteration: currentsum = 2,-1,= 2\n \"\"\"\n current_sum = max(element, current_sum + element)\n max_sum = max(max_sum, current_sum)\n \n return max_sum\n \n ","repo_name":"suman2020/LeetCode","sub_path":"maximum-subarray/maximum-subarray.py","file_name":"maximum-subarray.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34934587425","text":"from typing import List, Optional, Tuple\n\nimport magicdict\n\nfrom .. import constants, initials\n\nBODY_IS_CHUNKED = -1\nBODY_IS_ENDLESS = -2\n\n\nclass UnparsableHttpMessage(ValueError):\n pass\n\n\nclass InvalidHeader(UnparsableHttpMessage):\n pass\n\n\nclass InvalidTransferEncoding(InvalidHeader):\n pass\n\n\nclass InvalidContentLength(InvalidHeader):\n pass\n\n\nclass InvalidChunkLength(InvalidHeader):\n pass\n\n\ndef is_chunked_body(te_header_bytes: str) -> bool:\n te_header_pieces = [\n i.strip().lower() for i in te_header_bytes.split(\";\") if i\n ]\n\n last_piece = te_header_pieces.pop(-1)\n\n if (\n last_piece == \"identity\" and te_header_pieces\n ) or \"identity\" in te_header_pieces:\n raise InvalidTransferEncoding(\n \"Identity is not the only transfer encoding.\"\n )\n\n if \"chunked\" in te_header_pieces:\n raise InvalidTransferEncoding(\n \"Chunked transfer encoding found, but not at last.\"\n )\n\n return last_piece == \"chunked\"\n\n\ndef _parse_content_length_header(cl_header_str: str) -> int:\n try:\n return int(cl_header_str, 10)\n\n except ValueError as e:\n raise InvalidContentLength(\n \"The value of Content-Length is not valid.\"\n ) from e\n\n\ndef _split_initial_lines(buf: bytearray) -> Optional[List[str]]:\n pos = buf.find(b\"\\r\\n\\r\\n\")\n\n if pos == -1:\n return None\n\n initial_buf = buf[:pos]\n del buf[: pos + 4]\n\n return initial_buf.decode(\"latin-1\").split(\"\\r\\n\")\n\n\ndef _parse_headers(\n header_lines: List[str],\n) -> magicdict.FrozenTolerantMagicDict[str, str]:\n headers: List[Tuple[str, str]] = []\n\n try:\n for line in header_lines:\n name, value = line.split(\":\", 1)\n\n headers.append((name.strip(), value.strip()))\n\n except ValueError as e:\n raise InvalidHeader(\"Unable to unpack the current header.\") from e\n\n return magicdict.FrozenTolerantMagicDict(headers)\n\n\ndef parse_request_initial(\n buf: bytearray,\n) -> Optional[initials.HttpRequestInitial]:\n initial_lines = _split_initial_lines(buf)\n\n if initial_lines is None:\n return None\n\n try:\n method_buf, path_buf, version_buf = initial_lines.pop(0).split(\" \")\n\n headers = _parse_headers(initial_lines)\n\n return initials.HttpRequestInitial(\n constants.HttpRequestMethod(method_buf.upper().strip()),\n version=constants.HttpVersion(version_buf.upper().strip()),\n uri=path_buf,\n authority=headers.get(\"host\", None),\n scheme=headers.get_first(\"x-scheme\", None),\n headers=headers,\n )\n\n except InvalidHeader:\n raise\n\n except (IndexError, ValueError) as e:\n raise UnparsableHttpMessage(\n \"Unable to unpack the first line of the initial.\"\n ) from e\n\n\ndef parse_response_initial(\n buf: bytearray, req_initial: initials.HttpRequestInitial\n) -> Optional[initials.HttpResponseInitial]:\n initial_lines = _split_initial_lines(buf)\n\n if initial_lines is None:\n return None\n\n try:\n version_buf, status_code_buf, *status_text = initial_lines.pop(\n 0\n ).split(\" \")\n\n status_code = constants.HttpStatusCode(int(status_code_buf, 10))\n\n if status_code == constants.HttpStatusCode.CONTINUE:\n # Trim off 100 continue\n return parse_response_initial(buf, req_initial)\n\n return initials.HttpResponseInitial(\n status_code,\n version=constants.HttpVersion(version_buf),\n headers=_parse_headers(initial_lines),\n )\n\n except InvalidHeader:\n raise\n\n except (IndexError, ValueError) as e:\n raise UnparsableHttpMessage(\n \"Unable to unpack the first line of the initial.\"\n ) from e\n\n\ndef discover_request_body_length(initial: initials.HttpRequestInitial) -> int:\n if \"upgrade\" in initial.headers:\n return BODY_IS_ENDLESS\n\n if \"transfer-encoding\" in initial.headers and is_chunked_body(\n initial.headers[\"transfer-encoding\"]\n ):\n return BODY_IS_CHUNKED\n\n if \"content-length\" in initial.headers:\n return _parse_content_length_header(initial.headers[\"content-length\"])\n\n return 0\n\n\ndef discover_response_body_length(\n initial: initials.HttpResponseInitial,\n *,\n req_initial: initials.HttpRequestInitial,\n) -> int:\n if (\n initial.status_code == constants.HttpStatusCode.SWITCHING_PROTOCOLS\n or req_initial.method == constants.HttpRequestMethod.CONNECT\n ):\n return BODY_IS_ENDLESS\n\n # HEAD Requests and 204/304 Responses have no body.\n if (\n req_initial.method == constants.HttpRequestMethod.HEAD\n or initial.status_code\n in (\n constants.HttpStatusCode.NO_CONTENT,\n constants.HttpStatusCode.NOT_MODIFIED,\n )\n ):\n return 0\n\n if \"transfer-encoding\" in initial.headers and is_chunked_body(\n initial.headers[\"transfer-encoding\"]\n ):\n return BODY_IS_CHUNKED\n\n if \"content-length\" not in initial.headers:\n # Read until close.\n return BODY_IS_ENDLESS\n\n return _parse_content_length_header(initial.headers[\"content-length\"])\n\n\ndef parse_chunk_length(buf: bytearray) -> Optional[int]:\n pos = buf.find(b\"\\r\\n\")\n\n if pos == -1:\n return None\n\n len_buf = buf[:pos]\n del buf[: pos + 2]\n\n len_buf = len_buf.split(b\";\", 1)[0].strip()\n\n try:\n return int(len_buf, 16)\n\n except ValueError as e:\n raise InvalidChunkLength(\"Failed to decode Chunk Length\") from e\n","repo_name":"futursolo/magichttp","sub_path":"magichttp/h1impl/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"7563428549","text":"import numpy as np\n\"\"\"\nthis file will create a GM(grey prediction machine), and enable user to predict the future data based on current array of data. please note that this \nmachine is based on first order differential equation, so it will be good at predicting data with certain trend( growing or minimizing), not very useful\nfor the periodically changed data( or you can try to break the periodically changed data into parts and use this machine). good for short term future prediction but \nnot the long term\n\"\"\"\nclass GM_predictor(object):\n def __init__(self,existing_data):\n '''\n this constructor will help initialize a GM_predictor, only need to add the existing_data and then we will store the data for you\n :param existing_data:\n the series data we already have, will try to find the patterns of how the data change\n '''\n self.existingD = existing_data\n\n def GM11_predict(self, predictNum):\n '''\n this function will enable the GM_predictor to predict future data based on the existing data with the parameter usedDataNum and predictNum\n :param usedDataNum:\n the number of data to use in self.existingD, could change according to will, has to be greater or equal to 1\n :param predictNum:\n the number of data to predict later, predictNum is the length of the predicted data\n :return: result\n an list of data predicted by GM_predictor\n '''\n #first assuming using all data to do prediction, then make modifications\n oldData_x0 = np.array( self.existingD ) # getting all existing data\n dataSum_x1 = np.cumsum( oldData_x0 ) #summing up the prevoius value, culminate\n halfSum_z1 = (dataSum_x1[ :len(dataSum_x1)-1 ]+ dataSum_x1[ 1: ])/2.0 #shifting is faster than looping\n halfSum_z1 = halfSum_z1.reshape(len(halfSum_z1), 1) # change halfSum_z1 into array, get ride of the first term because it is none\n coef_b1 = np.append(-halfSum_z1,np.ones_like( halfSum_z1 ), axis= 1) #according to the fomula, setting coef_b1\n pseudoR_y = oldData_x0[1:].reshape( len(oldData_x0)-1, 1 ) #have to reshape the pseudoR_y into the same shape as coef_b1\n #getting the value of a,b according to the formula\n [ [a],[b] ] = np.dot(np.dot(np.linalg.inv( np.dot(coef_b1.T, coef_b1) ), coef_b1.T), pseudoR_y)\n result= []\n #use the formula, get oldData_x0's every later terms from dataSum_x1's solutions\n for i in range(oldData_x0.shape[0]+1,oldData_x0.shape[0]+predictNum+1): # index will begin in the end of the existing list and end in the len(existing list)+predictNum\n result.append((oldData_x0[0]-b/a)*np.exp( -a*(i-1) )-( oldData_x0[0]-b/a )*np.exp(-a*( i-2 )))\n return result\n\n","repo_name":"SimonSaid1996/curve_fittingAndGM_predictor","sub_path":"GM_predictor.py","file_name":"GM_predictor.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33081285136","text":"import sys\nimport stdio\nimport time\n\na = b = c = d = 1\n#e = [n**5: n in range(250)]\ne = 150\n\nstart = time.clock()\nwhile a <= e:\n while b <= e:\n while c <= e:\n while d <= e:\n if a**5 + b**5 +c**5 +d**5 == 144**5:\n print(a, b, c, d, e)\n #elapsed = (time.time() - start)\n #print(elapsed)\n d +=1\n c +=1\n d = c\n b += 1\n c = b\n a += 1\n b = a\n\n#print('finish' + str(time.time() - start))\n \n","repo_name":"JaydenYL/Practice","sub_path":"introcs-1.0/1.3.43.py","file_name":"1.3.43.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"14510827098","text":"import codecs\nimport csv\n\nfrom flask import request, make_response\nfrom flask_jsonpify import jsonify\n\nfrom apps.db.models import db, Upload, Point, Link\nfrom apps.services.tasks import celery\nfrom apps.services.upload import UploadGeodataService\n\nfrom app import app, auth\n\n\n@app.route('/api/calculateDistances', methods=['POST'])\n@auth.login_required\ndef calculate_distances():\n file = request.files['file']\n if not file:\n return 'Upload a CSV file'\n data = []\n stream = codecs.iterdecode(file.stream, 'utf-8')\n for row in csv.DictReader(stream):\n if row:\n data.append(row)\n\n service = UploadGeodataService(data)\n id = service.process()\n output = {'task_id': id,\n 'status': service.status}\n\n return make_response(jsonify(output), 200)\n\n\n@app.route('/api/getResult/', methods=['GET'])\n@auth.login_required\ndef get_result(result_id):\n qs = Upload.query.filter_by(task_id=result_id)\n if qs.first() is None:\n message = f'Task {result_id} not found.'\n return make_response(jsonify({'message': message}), 404)\n\n upload = qs.first()\n status = upload.status\n\n if status != Upload.DONE:\n # obtain real status from task\n status = celery.AsyncResult(result_id).status\n data = []\n else:\n qs_points = Point.query.filter_by(upload_id=upload.id)\n p_ids = [p.id for p in qs_points]\n data = {'points': [{'name': p.name, 'address': p.address}\n for p in qs_points],\n 'links': [{'name': l.name, 'distance': l.distance}\n for l in db.session.query(Link).filter((Link.point_a_id in p_ids) | (Link.point_b_id in p_ids))]}\n return make_response(jsonify({'task_id': result_id,\n 'status': status,\n 'data': data}), 200)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"VitaDorosheva/dokka_test_vd","sub_path":"apps/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72198724648","text":"from common_import import *\n\n\n@benchmark_registry.register(\"one_hot\")\nclass OneHotConfig(APIConfig):\n def __init__(self):\n super(OneHotConfig, self).__init__('one_hot')\n\n def init_from_json(self, filename, config_id=0, unknown_dim=16):\n super(OneHotConfig, self).init_from_json(filename, config_id,\n unknown_dim)\n self.feed_spec = {\"range\": [0, self.num_classes]}\n\n\n@benchmark_registry.register(\"one_hot\")\nclass PaddleOneHot(PaddleOpBenchmarkBase):\n def build_graph(self, config):\n x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)\n result = paddle.nn.functional.one_hot(\n x=x, num_classes=config.num_classes)\n\n self.feed_list = [x]\n self.fetch_list = [result]\n\n\n@benchmark_registry.register(\"one_hot\")\nclass TorchOneHot(PytorchOpBenchmarkBase):\n def build_graph(self, config):\n x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)\n result = torch.nn.functional.one_hot(\n input=x, num_classes=config.num_classes)\n\n self.feed_list = [x]\n self.fetch_list = [result]\n\n\n@benchmark_registry.register(\"one_hot\")\nclass TFOneHot(TensorflowOpBenchmarkBase):\n def build_graph(self, config):\n data = self.variable(\n name='data', shape=config.x_shape, dtype=config.x_dtype)\n result = tf.one_hot(\n indices=data,\n depth=config.num_classes,\n on_value=None,\n off_value=None,\n axis=None,\n dtype=None)\n\n self.feed_list = [data]\n self.fetch_list = [result]\n","repo_name":"PaddlePaddle/benchmark","sub_path":"api/tests/one_hot.py","file_name":"one_hot.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"53"}
+{"seq_id":"9947258234","text":"from tkinter import *\r\n\r\nvent= Tk()\r\nvent.title(\"Ejemplo de Campos y Etiquetas\")\r\nvent.geometry(\"400x200\")\r\nvent.iconbitmap(\"proyecto/favicon.ico\")\r\n\r\n# Etiqueta 1\r\nlbl1 = Label(vent, text=\"Nombre:\")\r\nlbl1.grid(row=0, column=0, padx=10, pady=10, sticky=\"w\")\r\n\r\n# Campo de entrada 1\r\ntxt1 = Entry(vent)\r\ntxt1.grid(row=0, column=1, padx=10, pady=10)\r\n\r\n# Etiqueta 2\r\nlbl2 = Label(vent, text=\"Apellido:\")\r\nlbl2.grid(row=1, column=0, padx=10, pady=10, sticky=\"w\")\r\n\r\n# Campo de entrada 2\r\ntxt2 = Entry(vent)\r\ntxt2.grid(row=1, column=1, padx=10, pady=10)\r\n\r\nvent.mainloop()\r\n","repo_name":"jamesmosqr/TKinter","sub_path":"Proyecto/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20612241951","text":"from keras import backend as K\nfrom keras.callbacks import Callback\nimport numpy as np\nimport tensorflow as tf\n\n\nclass AutoClassWeighting(Callback):\n \"\"\"\n Automatically update the classes weighting for anchor-based detection algorithms.\n\n Currently, only merged with YOLOV4.\n Detailed demonstration could be found at:\n https://confluence.nvidia.com/display/~tylerz/Auto+class+weighting+-\n -+trainning+anchor-based+object+detection+model+on+unbalanced+dataset\n \"\"\"\n\n def __init__(self, train_dataset, loss_ops, alpha=0.9, interval=10):\n \"\"\"Init function.\"\"\"\n super(AutoClassWeighting, self).__init__()\n self.train_dataset = train_dataset\n self.classes = train_dataset.classes\n self.loss_ops = loss_ops\n self.alpha = alpha\n self.pred = tf.Variable(0., validate_shape=False,\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n self.label = tf.Variable(0., validate_shape=False,\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n self.loss_per_class = np.zeros((len(self.classes)), dtype=np.float32)\n self.anchor_per_class = np.zeros((len(self.classes)), dtype=np.float32)\n self.interval = interval\n\n def on_batch_end(self, batch, logs=None):\n \"\"\"Compute per anchor loss in every batch.\"\"\"\n # compute the per class loc_loss cls_loss (average by num of assigned anchors)\n # input_data\n pred = K.get_value(self.pred)\n encoded_lab = K.get_value(self.label)\n # y_pred_encoded = self.model.predict(batch_x)\n batch_loss = K.get_session().run(self.loss_ops[2],\n feed_dict={self.loss_ops[0]: encoded_lab,\n self.loss_ops[1]: pred})\n # loc_loss: [#batch, #anchor]; cls_loss: [#batch, #anchor]\n loc_loss, cls_loss = batch_loss\n # convert the one-hot vector to index\n idx_map = np.tile(np.arange(len(self.classes)),\n [loc_loss.shape[0], loc_loss.shape[1], 1])\n one_hot_vectors = encoded_lab[:, :, 6:-1]\n neg_map = np.full(one_hot_vectors.shape, -1)\n cls_idx = np.max(np.where(one_hot_vectors == 1, idx_map, neg_map), axis=-1)\n # compute the loss per class\n for idx in range(len(self.classes)):\n cur_loc_loss = float(0)\n cur_cls_loss = float(0)\n cur_loc_loss = loc_loss[cls_idx == idx]\n if len(cur_loc_loss) <= 0:\n continue\n num_anchor = cur_loc_loss.shape[0]\n cur_loc_loss = np.sum(cur_loc_loss)\n cur_cls_loss = np.sum(cls_loss[cls_idx == idx])\n self.loss_per_class[idx] += cur_loc_loss + cur_cls_loss\n self.anchor_per_class[idx] += num_anchor\n\n def on_epoch_end(self, epoch, logs=None):\n \"\"\"Compute per anchor per class loss and classes weighting at the end of epoch.\"\"\"\n # compute the per class weights (reciprocal of each loss * maximum loss)\n old_weights = np.array(self.train_dataset.encode_fn.class_weights)\n # print(f\"old_weights: {old_weights}\")\n self.loss_per_class = self.loss_per_class / old_weights\n self.loss_per_class = self.loss_per_class / self.anchor_per_class\n # max_loss = np.max(self.loss_per_class)\n min_loss = np.min(self.loss_per_class)\n new_wts = []\n for idx in range(len(self.classes)):\n new_w = float(self.loss_per_class[idx]) / min_loss\n # print(f\"{self.classes[idx]}:{self.loss_per_class[idx]}\")\n # print(f\"{self.classes[idx]}_anchor:{self.anchor_per_class[idx]}\")\n # logs[f\"{self.classes[idx]}_loss\"] = self.loss_per_class[idx]\n # logs[f\"{self.classes[idx]}_anchor\"] = self.anchor_per_class[idx]\n new_wts.append(new_w)\n # Make the first epoch count !\n if epoch != 0:\n new_wts = old_weights * self.alpha + np.array(new_wts) * (1 - self.alpha)\n # print(f\"new_weights: {new_wts}\")\n if epoch % self.interval == 0:\n self.train_dataset.encode_fn.update_class_weights(new_wts)\n self.loss_per_class = np.zeros((len(self.classes)), dtype=np.float32)\n self.anchor_per_class = np.zeros((len(self.classes)), dtype=np.float32)\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/common/callbacks/auto_class_weighting_callback.py","file_name":"auto_class_weighting_callback.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"26166174662","text":"import RPi.GPIO as GPIO\nimport time\nimport numpy as np\nimport global_test_var as GV\n# from HDM import *\n##from Sql_db import *\nfrom CAN_Bus import *\n'''--------------------------raspberry pi GPIO pin setup --------------------------- '''\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nvaccume_sense=13\nGPIO.setup(vaccume_sense, GPIO.IN)\nGV.ConnPresent=0\nGV.leakage = [[1, 1],[3, 14], [4, 21]]\nGV.switch = [[1, 3], [2, 9], [3, 16], [4, 23]]\nGV.led = [[1, 2], [3, 15], [4, 22]]\n\ndef Frl_on():\n single_write(64,1)\ndef Frl_off():\n single_write(64,0)\n\ndef Connector_Availability():\n GV.switch_status=[]\n for i in range (len(GV.switch)):\n\n Switch_status=single_read(GV.switch[i][1])\n x=[GV.switch[i][0],Switch_status]\n GV.switch_status.append(x)\n for j in range(len(GV.switch_status)):\n for k in range(len(GV.led)): \n if(GV.switch_status[j][0] == GV.led[k][0]):\n if(GV.switch_status[j][1]==0):\n single_write(GV.led[k][1],1)\n else:\n single_write(GV.led[k][1],0)\n\n## print(GV.switch_status) \n\ndef Leak_Test():\n waitTime=3\n x=len(GV.switch)\n y=0\n \n while(x!=y):\n \n Connector_Availability()\n## print(\"Y.............\",y)\n if(GV.switch_status[y][1]==1):\n print(\"Connector \" +str(GV.switch_status[y][0])+ \" present\")\n z=list((np.zeros(len(GV.leakage),dtype='i')))\n for j in range(len(GV.leakage)):\n \n if(GV.switch_status[y][0] in GV.leakage[j]):\n\n single_write(GV.leakage[j][1],1)\n single_write(GV.led[j][1],1)\n Frl_on()\n \n now=time.time()\n print(\"Sense\",GPIO.input(vaccume_sense))\n while (GPIO.input(vaccume_sense)==0 and (time.time()-now) None:\n self.value = value\n\n def __str__(self) :\n return self.value\n\n def __add__(self, obj) :\n # print('add')\n # print(self.value)\n # print(obj.value * 2)\n # print(f'{self.value} + {(obj.value * 2)}')\n return self.value + obj.value + (self.value + obj.value)//2\n\n def toRoman(self) :\n _num = {\"M\" : 1000, \"CM\" : 900, \"D\" : 500, \"CD\" : 400, \"C\" : 100, \"XC\" : 90, \"L\" : 50, \"XL\" : 40, \"X\" : 10, \"IX\" : 9, \"V\" : 5, \"IV\" : 4, \"I\" : 1}\n _roman = ''\n # print(self.value, _num.get('M'))\n # print(_num.items())\n\n _temp = self.value\n # print(_temp)\n\n # print('========')\n for i, j in _num.items() :\n # print(i, j)\n while _temp >= j :\n _temp -= j\n _roman += i\n return _roman\n \n\nif __name__ == '__main__' :\n print(' *** class MyInt ***')\n inp1, inp2 = [int(i) for i in input('Enter 2 number : ').split()]\n # print(inp1, inp2)\n # print(inp2*2)\n a = MyInt(inp1)\n b = MyInt(inp2)\n\n # print(a.toRoman())\n # print(b.toRoman())\n\n print(f'{inp1} convert to Roman : {a.toRoman()}')\n print(f'{inp2} convert to Roman : {b.toRoman()}')\n\n print(f'{inp1} + {inp2} = {a + b}')","repo_name":"Charonyx/DataStruct2564","sub_path":"Exam01 Python & Class/exam05_.py","file_name":"exam05_.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"529912553","text":"from __future__ import print_function\nfrom utils import dataUtil as du\nimport tensorflow as tf\nimport numpy as np\n\nExp = np.exp(1.0)\n\ndef combineList(tensorList, weights):\n\tt = tensorList[0] * weights[0]\n\tfor i in range(1, len(tensorList)):\n\t\tt += tensorList[i] * weights[i]\n\treturn t\n\ndef getLastEleWeights(tensorList):\n\tout = [tf.constant(0.0, dtype = tf.float32) for _ in range(len(tensorList))]\n\tout[-1] = tf.constant(1.0, dtype = tf.float32)\n\treturn out\n\ndef getAverageWeights(tensorList):\n\tl = len( tensorList )\n\tout = [ tf.constant( 1.0 / float(l), dtype = tf.float32 ) for _ in range(l) ]\n\treturn out\n\ndef getDecayingWeights(tensorList):\n\tl = len(tensorList)\n\tcur = 1.0\n\tout = [cur]\n\ttotal = cur\n\n\t# create list\n\tfor i in range(1, len(tensorList)):\n\t\tcur = cur * Exp\n\t\tout.append(cur)\n\t\ttotal += cur\n\n\t# normalize\n\tout = [ele/total for ele in out]\n\n\t# convert to tensor\n\tout = [tf.constant(ele, dtype = tf.float32) for ele in out]\n\treturn out\n\ndef getAttentionWeights(tensorList):\n\tpass\n\n# multiply a batch of matrix a tensor in the shape of [D, N, L]\n# with a transformation matrix [L, transformSize]\n# output a tensor of shape [D, N, transformSize]\ndef batchMatMul(a, b, D):\n\tout = []\n\tfor i in range(D):\n\t\tout.append(tf.matmul(a[i], b))\n\treturn tf.stack(out)\n\n# assume the input is of shape [D, 1]\n# add one more 0 to it (output shape [D+1, 1])\ndef addBias(x):\n\tx = tf.concat([x, tf.constant(np.array([[0.0]]), dtype = tf.float32)], axis = 0)\n\treturn x\n\n# get weight variavle of initial values from truncated normal\ndef tnVariable(shape, name = None):\n\tinitial = tf.truncated_normal(shape, stddev=0.1)\n\tif name is None:\n\t\treturn tf.Variable(initial)\n\telse:\n\t\treturn tf.get_variable(name = name, initializer = initial, dtype = tf.float32)\n\n# get bias variable of all initial value of 0.1\ndef biasVariable(shape, name = None):\n initial = tf.constant(0.1, shape=shape)\n if name is None:\n \treturn tf.Variable(initial)\n else:\n \treturn tf.get_variable(name = name, initializer = initial, dtype = tf.float32)\n\n# stride 1 convolution, H and W won't change\ndef conv2dStide1(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n# 1 by 1 average ???\ndef avg1x1(x):\n\treturn tf.nn.avg_pool(x, ksize=[1, 1, 1, 1],\n strides=[1, 1, 1, 1], padding='SAME')\n\ndef calcProfit(action, logR, inBatch = False):\n\tif not inBatch:\n\t\treturn calcProfitNoBatch(action, logR)\n\telse:\n\t\treturn calcProfitBatch(action, logR)\n\ndef calcTransCost(action, prevAction, prevLogR, transCostParams, mRatio, inBatch = False):\n\tif not inBatch:\n\t\treturn calcTransCostNoBatch(action, prevAction, prevLogR, transCostParams, mRatio)\n\telse:\n\t\treturn calcTransCostBatch(action, prevAction, prevLogR, transCostParams, mRatio)\n\ndef calcProfitNoBatch(action, logR):\n\tprofit = tf.reduce_sum(tf.multiply(action[:-1], logR))\n\treturn profit\n\ndef calcProfitBatch(action, logR):\n\tprofit = tf.reduce_sum(tf.multiply(action[:,:-1], logR), axis = 1)\n\treturn tf.reduce_sum(profit)\n\n\ndef calcTransCostNoBatch(action, prevAction, prevLogR, transCostParams, mRatio):\n\tc = transCostParams['c']\n\tc0 = transCostParams['c0']\n\tpriceRatio = tf.exp(prevLogR)\n\tchanges = tf.abs(action[:-1] - mRatio * tf.multiply(priceRatio, prevAction[:-1]))\n\ttransactionCost = tf.reduce_sum( tf.multiply(c, changes) )\n\ttransactionCost += c0\n\treturn transactionCost\n\ndef calcTransCostBatch(action, prevAction, prevLogR, transCostParams, mRatio):\n\tc = transCostParams['c']\n\tc0 = transCostParams['c0']\n\tpriceRatio = tf.exp(prevLogR)\n\tchanges = tf.abs(action[:,:-1] - mRatio * tf.multiply(priceRatio, prevAction[:,:-1]))\n\ttransactionCost = tf.reduce_sum( tf.multiply(c, changes) , axis = 1)\n\ttransactionCost += c0\n\treturn tf.reduce_sum(transactionCost)\n\ndef train1epoch(returnTensor, prevReturnMatrix, nextReturnMatrix, curModel, sess, B = None):\n\treturn trainOrTest1Epoch(returnTensor, prevReturnMatrix, nextReturnMatrix, curModel, sess)\n\ndef test1epoch(returnTensor, prevReturnMatrix, nextReturnMatrix, curModel, sess, B = None):\n\treturn trainOrTest1Epoch(returnTensor, prevReturnMatrix, nextReturnMatrix, curModel, sess, training = False)\n\ndef trainOrTest1Epoch(returnTensor, prevReturnMatrix, nextReturnMatrix, curModel, sess, training = True):\t\n\ttotalIters = returnTensor.shape[0]\n\tprevLoss = 0.0\n\tD = len(prevReturnMatrix[0])\n\tprevA = du.getInitialAllocation(D)\n\tallActions = []\n\tallLosses = []\n\n\tfor t in range(totalIters):\n\t\tmRatio = du.loss2mRatio(prevLoss)\n\t\tinputs = {\n\t\t\t'X': returnTensor[t],\n\t\t\t'prevReturn': prevReturnMatrix[t],\n\t\t\t'nextReturn': nextReturnMatrix[t],\n\t\t\t'prevA': prevA,\n\t\t\t'mRatio': mRatio\n\t\t}\n\t\tif training:\n\t\t\tcurA, curLoss = curModel.train(inputs, sess)\n\t\telse:\n\t\t\tcurA, curLoss = curModel.get_action(inputs, sess)\n\t\tallActions.append(curA)\n\t\tallLosses.append(curLoss)\n\n\t\tprevLoss = curLoss\n\t\tprevA = curA\n\t\n\ttotalLoss = sum(allLosses)\n\tgrowthRates = map(lambda x: 1-x, allLosses)\n\n\treturn allActions, growthRates\n\n\t# add one more dimension for batching (which seems not very useful in this case)\t\ndef addNoneDim(shape):\n\treturn tuple([None] + list(shape))\n\ndef addNoneDimToAll(shapes):\n\treturn [addNoneDim(shape) for shape in shapes]\n\n###\n###\n### below are deprecated functions please don't use\ndef calcReturnWithPrice(prevA, prevS, nextS, mRatio, action, transCostParams, D, N):\n\t# return the total return\n\n\t# get parameters\n\tc = transCostParams['c']\n\tc0 = transCostParams['c0']\n\n\t# get holdings (stock holdings except for reserve) and stock prices\n\ttodayHoldings = tf.slice(action, [0, 0], [D, 1])\n\tyesterdayHoldings = tf.slice(prevA, [0, 0], [D, 1])\n\ttodayS = tf.slice(prevS, [0, N-1], [D, 1])\n\tyesterdayS = tf.slice(prevS, [0, N-2], [D, 1])\n\n\t# calc profit and transaction cost\n\tprofit = calcProfitWithPrice(nextS, todayS, todayHoldings)\n\ttransactionCost = calctransactionCostWithPrice(todayS, yesterdayS, todayHoldings, yesterdayHoldings, mRatio, c, c0)\n\n\t# total return\n\tR = profit - transactionCost\n\tprint('R', R.shape)\n\treturn R\n\n# profit\ndef calcProfitWithPrice(nextS, todayS, todayHoldings):\n\tstockChange = nextS - todayS\n\tprofit = tf.reduce_sum( tf.multiply( tf.div(stockChange, todayS), todayHoldings) )\n\treturn profit\n\n# transaction cost\ndef calctransactionCostWithPrice(todayS, yesterdayS, todayHoldings, yesterdayHoldings, mRatio, c, c0):\n\tholdingsChange = tf.abs( tf.div(todayHoldings, todayS) - mRatio * tf.div(yesterdayHoldings, yesterdayS) )\n\tholdingsChange = tf.multiply(holdingsChange, todayS)\n\ttransactionCostEach = tf.multiply(c, tf.multiply(holdingsChange, todayS))\n\ttransactionCostTotal = tf.reduce_sum(transactionCostEach) + c0\n\treturn transactionCostTotal\n\n\n\n","repo_name":"devilhtc/CS221-GroupProject","sub_path":"models/modelUtil.py","file_name":"modelUtil.py","file_ext":"py","file_size_in_byte":6598,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"4375824383","text":"\"\"\"\n# Exercícios\n\n### Antes de irmos para o desafio que apresentamos na última aula (que é bem mais complexo do que um exemplo simples) vamos resolver um exercício um pouco mais simples para treinar\n\n## 1. Cálculo do Percentual e da Lista de Vendedores\n\n- Queremos criar uma function que consiga identificar os vendedores que bateram uma meta, mas além disso, consigo já me dar como resposta o cálculo do % da lista de vendedores que bateu a meta (para eu não precisar calcular manualmente depois)\n- Essa function deve receber 2 informações como parâmetro: a meta e um dicionário com os vendedores e suas vendas. E me dar 2 respostas: uma lista com o nome dos vendedores que bateram a meta e o % de vendedores que bateu a meta.\n\n\"\"\"\n\nmeta = 10000\nvendas = {\n 'João': 15000,\n 'Julia': 27000,\n 'Marcus': 9900,\n 'Maria': 3750,\n 'Ana': 10300,\n 'Alon': 7870,\n}\n\n# crie sua function aqui\n\ndef batedores_meta(meta,vendas):\n lista_vendedores_meta=[]\n for chave in vendas:\n if vendas[chave]>=meta:\n lista_vendedores_meta.append(chave)\n\n return lista_vendedores_meta,len(lista_vendedores_meta)/len(vendas)\n\n\n\n# aplique sua function nas informações para ver se está funcionando\n\nprint(batedores_meta(meta,vendas)) # (['João', 'Julia', 'Ana'], 0.5)","repo_name":"jharbes/hashtagPython","sub_path":"014-funcoesEmPython/12-exerciciosDeFunction-parte3/exerciciosDeFunction-parte3.py","file_name":"exerciciosDeFunction-parte3.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31644122936","text":"\n#suma de vectores\nimport math\nvector11=int(input(\"x1:\"))\nvector12=int(input(\"y1:\"))\nvector13=int(input(\"z1:\"))\n#\nvector21=int(input(\"x2:\"))\nvector22=int(input(\"y2:\"))\nvector23=int(input(\"z2:\"))\ndef suma():\n\tsuma1=vector11+ vector21\n\tsuma2= vector12+ vector22\n\tsuma3= vector13+ vector23\n\tsumap=suma1,suma2,suma3\n\tprint(\"los numeros de la suma son:\"+ str(sumap))\n\nsuma()\n","repo_name":"KerlyNaranjo/Prueba","sub_path":"sumavector.py","file_name":"sumavector.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35845867965","text":"import subprocess #Work with computer processes and executables\nimport pathlib #Work with directory paths on local\nimport speech_recognition as sr #Speech recognition lib\nfrom docx import Document #Word Doc lib\nimport datetime as dt #Gather today's date\nimport pyowm #Weather API lib\nimport time #Grab current time\nimport requests #HTTP requests lib\nimport json #Parsing JSONs from Spotify API\nimport base64 #Convert to Base64 String for Spotify API passcode\nimport ast #Turns strings to dictionaries\nimport wikipedia #Wiki scraper package\nimport pyttsx3 #Text to speech response package\nimport playsound #Capable of playing sounds\nfrom newsapi import NewsApiClient #News headlines API lib\n\n#These Keys are stored on my local, get your own!\ntry:\n with open(\"../JeevesKeys.txt\", \"r\") as f:\n content = f.readlines()\n owAPI = content[0] #Insert your API key to Open Weather Maps here\n spotifyID = content[1] #Insert Spotify Client ID here\n spotifySecret = content[2] #Insert Spotify Client Secret here\n spotifyRefreshToken = content[3]\n spotifyRefreshHeader = content[4]\n newsAPIkey = content[5] #Insert your API key to NewsAPI here\n\n #Initialise the News API\n newsapi = NewsApiClient(newsAPIkey)\nexcept:\n print(\"Without a set of keys and secrets for the APIs in this project,\")\n print(\"including Spotify, NewsAPI and OWAPI, the functionality of Jeeves is diminished.\")\n print(\"Open the code and find where the keys are opened in a related .txt file,\")\n print(\"You will need to replace this with your own file, or create one. Please modify as necessary.\")\n\nclass TTS:\n engine = None\n rate = None\n\n #Set initialisation properties for the voice here\n def __init__(self):\n self.engine = pyttsx3.init() #Initialise text to speech functionality\n self.engine.setProperty('voice',self.engine.getProperty('voices')[7].id) #Set British Male voice\n self.engine.setProperty('rate',250)\n\n #Instance method for speaking aloud\n def speak(self, text):\n self.engine.say(text)\n self.engine.runAndWait()\n\nclass Commands:\n tts = TTS() #Define prefix for calling tts\n\n @classmethod\n def openApp(cls, app, rootname=\"none\"):\n \"\"\"\n Function which paths to Apps directory and opens an app using subprocess based on user input\n Inputs: app - the name (exactly as given in command line) of the app to open (str)\n rootname (optional) - if the App's name is different to its containing directory on the mac, rootname is the actual executable's name\n e.g. Microsoft Teams.app/Contents/MacOS/Teams where Microsoft Teams.app is the app and Teams is the rootname executable\n \"\"\"\n\n if rootname == \"none\":\n subprocess.Popen([\"../../../../Applications/{0}.app/Contents/MacOS/{0}\".format(app),\"-i\"])\n else:\n subprocess.Popen([\"../../../../Applications/{0}.app/Contents/MacOS/{1}\".format(app,rootname),\"-i\"])\n\n @classmethod\n def dictate(cls, notes):\n \"\"\"\n Function which takes dictated input and creates a word doc filled with the dictated text\n Input: notes - the recorded input to be transcribed (str)\n \"\"\"\n\n doc = Document() #Create doc using constructor\n today = dt.date.today() #Take date for heading\n\n #List to split paragraphs\n paras = []\n\n #Replacers to change ends of sentences to \". \"\n while \"end sentence\" in notes or \"and sentence\" in notes:\n loc = notes.find(\"end sentence\")\n if loc != -1:\n notes[loc+14].capitalize()\n loc2 = notes.find(\"and sentence\")\n if loc2 != -1:\n notes[loc2+14].capitalize()\n notes = notes.replace(\" end sentence\", \".\")\n notes = notes.replace(\" and sentence\", \".\")\n\n #Replacers allow user to escape the words 'paragraph, paragraphs' if included in notes\n while \"escape paragraph\" in notes:\n notes = notes.replace(\"escape paragraph\", \"pescape\")\n while \"paragraphs\" in notes:\n notes = notes.replace(\"paragraphs\", \"psescape\")\n\n while \"paragraph\" in notes:\n loc = notes.index(\"paragraph\")\n current = notes[:loc] #Split into next paragraph at keyword \"paragraph\"\n\n while \"pescape\" in current:\n current = current.replace(\"pescape\",\"paragraph\")\n while \"psescape\" in current:\n current = current.replace(\"psescape\", \"paragraphs\")\n\n paras.append(current)\n notes = notes[loc+10:] #Cut appended paragraph from notes\n\n while \"pescape\" in notes:\n notes = notes.replace(\"pescape\",\"paragraph\")\n while \"psescape\" in notes:\n notes = notes.replace(\"psescape\", \"paragraphs\")\n paras.append(notes)\n\n #Check if a similarly named doc already exists, save to new\n notestoday = []\n for i in pathlib.Path(\"Dictations\").iterdir():\n if str(today) in str(i):\n notestoday.append(str(i))\n name = \"Dictations/{0}-{1}.docx\".format(today,len(notestoday)+1)\n\n doc.add_heading(\"Notes for {0} (Entry {1})\".format(today,len(notestoday)+1), 0)\n\n for i in paras:\n doc.add_paragraph(i) #Add notes to document\n \n doc.save(name)\n print(\"All done sir! You'll find it under the name {0}.\".format(name))\n Commands.tts.speak(\"All done sir! You'll find it under the name {0}.\".format(name))\n\n @classmethod\n def weather(cls):\n \"\"\"\n Function that returns the weather data for the current time grabbed from the OpenWeatherMap API\n \"\"\"\n\n owm = pyowm.OWM(owAPI)\n observation = owm.weather_at_place(\"London,UK\") #Grab weather object from API\n\n w = observation.get_weather() #Pull weather from object\n temp = w.get_temperature(\"celsius\") #Temperature dict in celsius\n wind = w.get_wind(\"miles_hour\") #Wind dict\n rain = w.get_rain() #Rain dict\n\n print(\"The temperature is currently {0}˚C, ranging between {1}˚C and {2}˚C.\".format(temp[\"temp\"],temp[\"temp_max\"],temp[\"temp_min\"]))\n Commands.tts.speak(\"The temperature is currently {0}˚C, ranging between {1}˚C and {2}˚C.\".format(temp[\"temp\"],temp[\"temp_max\"],temp[\"temp_min\"]))\n\n if len(rain) == 0:\n print(\"There's no rain at the moment.\")\n Commands.tts.speak(\"There's no rain at the moment.\")\n\n\n print(\"The wind is currently blowing at {0:0.2f}mph on bearing {1}˚.\".format(wind[\"speed\"],wind[\"deg\"]))\n Commands.tts.speak(\"The wind is currently blowing at {0:0.2f}mph on bearing {1}˚.\".format(wind[\"speed\"],wind[\"deg\"]))\n\n @classmethod\n def forecast(cls, hr):\n \"\"\"\n Function that returns a weather prediction by grabbing data from the OpenWeatherMap API\n Input: hr - the hour to forecast for, must be 1, 2 or 3 (three hours max) (int)\n \"\"\"\n\n if hr > 3:\n print(\"Sorry sir, I can only forecast over the next three hours.\")\n Commands.tts.speak(\"Sorry sir, I can only forecast over the next three hours.\")\n\n return None\n\n owm = pyowm.OWM(owAPI)\n forecast = owm.three_hours_forecast(\"London,UK\") #Grab weather forecast object from API\n\n #Format time to ISO8601-formatted string to grab correct data\n today = dt.date.today()\n hour = time.gmtime(time.time()+3600+(hr*3600)).tm_hour\n minute = time.gmtime(time.time()+3600+(hr*3600)).tm_min\n second = time.gmtime(time.time()+3600+(hr*3600)).tm_sec\n ftime = \"{0} {1}:{2}:{3}+00\".format(today,hour,minute,second)\n\n fw = forecast.get_weather_at(ftime) #Pull weather from forecast object\n ftemp = fw.get_temperature(\"celsius\") #Temperature dict in celsius\n fwind = fw.get_wind(\"miles_hour\") #Wind dict in mph\n frain = fw.get_rain() #Rain dict\n\n print(\"The temperature in {0} hours' time will range between {1}˚C and {2}˚C.\".format(hr,ftemp[\"temp_min\"],ftemp[\"temp_max\"]))\n Commands.tts.speak(\"The temperature in {0} hours' time will range between {1}˚C and {2}˚C.\".format(hr,ftemp[\"temp_min\"],ftemp[\"temp_max\"]))\n \n if hr == 1: \n if \"1h\" not in frain: #No rain predicted in next hour\n print(\"No rain is forecast for the next hour.\")\n Commands.tts.speak(\"No rain is forecast for the next hour.\")\n \n else:\n print(\"{0}mm of rain is expected over the next hour.\".format(frain[\"1h\"]))\n Commands.tts.speak(\"{0}mm of rain is expected over the next hour.\".format(frain[\"1h\"]))\n \n\n elif hr == 2:\n if \"2h\" not in frain:\n print(\"No rain is forecast for the next two hours.\")\n Commands.tts.speak(\"No rain is forecast for the next two hours.\")\n \n else:\n print(\"{0}mm of rain is expected over the next two hours.\".format(frain[\"2h\"]))\n Commands.tts.speak(\"{0}mm of rain is expected over the next two hours.\".format(frain[\"2h\"]))\n \n\n elif hr == 3:\n if \"3h\" not in frain:\n print(\"No rain is forecast for the next three hours.\")\n Commands.tts.speak(\"No rain is forecast for the next three hours.\")\n \n else:\n print(\"{0}mm of rain is expected over the next three hours.\".format(frain[\"3h\"]))\n Commands.tts.speak(\"{0}mm of rain is expected over the next three hours.\".format(frain[\"3h\"]))\n \n\n\n print(\"There will be a {0:0.2f}mph wind on bearing {1}˚.\".format(fwind[\"speed\"],fwind[\"deg\"]))\n Commands.tts.speak(\"There will be a {0:0.2f}mph wind on bearing {1}˚.\".format(fwind[\"speed\"],fwind[\"deg\"]))\n\n files = [] #Necessary class variable for completelist/filelist\n\n @classmethod\n def completelist(cls, d):\n\n for i in pathlib.Path(d).iterdir():\n if (\".DS_Store\" in str(i) or \"__pycache__\" in str(i) or \".ipynb_checkpoints\" in str(i)):\n continue\n else:\n if str(i) not in Commands.files:\n Commands.files.append(str(i))\n else:\n pass\n try:\n Commands.completelist(i)\n except NotADirectoryError:\n continue\n \n f = [] #Local variable to return so class variable can be cleared\n for i in Commands.files:\n f.append(i)\n\n return f\n\n @classmethod\n def filelist(cls, d, search):\n \n files = Commands.completelist(d)\n Commands.files = [] #Clear files class variable for re-use\n results = []\n \n for i in files:\n if search in i:\n results.append(i)\n else:\n continue\n \n if len(results) == 0:\n print(\"I'm afraid I couldn't find any files with that name in sir. Sorry!\")\n Commands.tts.speak(\"I'm afraid I couldn't find any files with that name in sir. Sorry!\")\n\n else:\n print(\"I've found the following list of files that might fit your purpose sir:\\n{0}\".format(results))\n Commands.tts.speak(\"I've found the following list of files that might fit your purpose sir\")\n\n access = \"\" #Necessary class variable taking place of Spotify API Token\n\n @classmethod\n def refreshSpotifyToken(cls, sRT, sRH):\n \"\"\"\n Function that pipes to command line to curl request a new spotify token\n Inputs: sRT - spotifyRefreshToken (str)\n sRH - spotifyRefreshHeader (str)\n Defines 'access' class variable, currently active auth token for Spotify API\n \"\"\"\n #Pipe subprocess command to terminal, curl request refresh token\n get_token = subprocess.Popen(\n [\"curl\", \"-H\", sRH, \"-d\", \"grant_type=refresh_token\", \"-d\", \"refresh_token=\"+sRT, \"https://accounts.spotify.com/api/token\"],\n stdout = subprocess.PIPE,\n )\n out = get_token.communicate() #Returns json object of Spotify API data\n\n #Stringify byte iformation to slice\n out = str(out)\n opening = out.find(\"{\") #Start of object\n closing = out.find(\"}\") #Close of object\n out = out[opening:closing+1]\n\n out = ast.literal_eval(out) #Turn string into dictionary\n Commands.access = out[\"access_token\"] #Access token for Spotify data\n\n @classmethod\n def define(cls, topic, sentences=4):\n try:\n wiki = wikipedia.summary(topic, sentences=sentences)\n print(\"The topic you searched for came out as '{0}'\".format(topic))\n print(\"Here's what I found sir:\")\n Commands.tts.speak(\"The topic you searched for came out as '{0}'. Here's what I found sir.\".format(topic))\n print(wiki)\n Commands.tts.speak(wiki)\n except:\n print(\"Sorry sir, I ran into some errors trying to fetch that. Feel free to try again.\")\n Commands.tts.speak(\"Sorry sir, I ran into some errors trying to fetch that. Feel free to try again.\")\n\n @classmethod\n def episode(cls):\n #Fetch correct episode number from separate file\n #As file updates in real time but Jeeves cannot, must be saved separately\n with open(\"episodecounter.py\") as f:\n line = str(f.readlines()[0])\n starTrekEpisode = int(line[18:]) #Trim for number\n\n #Open csv with episodes\n with open(\"startrekepisodes.csv\", encoding=\"utf-8\") as f:\n line = f.readlines()[starTrekEpisode] #Read line attributed to correct episode\n data = []\n while len(str(line)) > 0: #Separate line string into list of entries\n loc = line.find(\",\")\n if loc != -1:\n entry = line[:loc]\n data.append(entry)\n line = line[loc+1:]\n else:\n data.append(line)\n line = \"\"\n\n #Current episode data\n season = data[2]\n episode = data[3]\n episode = \"Season \" + episode #Add to string for text to speech\n episode = episode.replace(\"x\",\" Episode \")\n\n #Say current episode\n print(\"The next episode to watch is {0}, {1}.\".format(season,episode))\n Commands.tts.speak(\"The next episode to watch is {0}, {1}.\".format(season,episode))\n\n #Add one to episode (moving to next episode) and overwrite file\n content = [\"starTrekEpisode = {n}\".format(n=starTrekEpisode+1)]\n with open(\"episodecounter.py\", \"w\") as f:\n f.write(content[0])\n\n @classmethod\n def cast(cls, topic):\n text = wikipedia.WikipediaPage(topic).html() #Grab html from wikipedia\n\n #Trim html tags for list of starring\n text = text[text.index(\"Starring\"):]\n openlist = text.index(\"\")\n closelist = text.index(\"
\")\n text = text[openlist:closelist]\n\n #New list to fill with cast names\n cast = []\n while len(text) > 0:\n openitem = text.find('\">') #Trim list tags to get names\n closeitem = text.find(\"\")\n if openitem != -1:\n name = text[openitem+2:closeitem]\n cast.append(name)\n else:\n text = \"\"\n text = text[closeitem+4:]\n\n #Output to terminal and speak aloud\n print(\"The cast of {0} includes\".format(topic))\n Commands.tts.speak(\"The cast of {0} includes\".format(topic))\n print(cast)\n for i in cast:\n Commands.tts.speak(i)\n\n @classmethod\n def news(cls):\n top_headlines = newsapi.get_top_headlines(language='en',sources='bbc-news')\n print(\"Here are the headlines from BBC News:\")\n Commands.tts.speak(\"Here are the headlines from BBC News.\")\n for i in top_headlines[\"articles\"]:\n print(\"-> {0}\".format(i[\"title\"]))\n Commands.tts.speak(i[\"title\"])\n\ndef Jeeves():\n tts = TTS() #Define prefix for calling tts\n\n r = sr.Recognizer()\n mic = sr.Microphone()\n\n attending = True\n\n \"\"\"if time.gmtime().tm_hour > 11: #Check it's afternoon\n print(\"Good afternoon sir, what can I do for you today?\")\n tts.speak(\"Good afternoon sir, what can I do for you today?\")\n else:\n print(\"Good morning sir, what can I do for you today?\")\n tts.speak(\"Good morning sir, what can I do for you today?\")\"\"\"\n\n while attending == True:\n\n playsound.playsound(\"jeeves_sound.mp3\")\n\n with mic as source:\n audio = r.listen(source, phrase_time_limit=7.0)\n\n prompt = r.recognize_google(audio)\n print(\"> {0}\".format(prompt))\n\n #Remove pleasantries\n while \"please\" in prompt:\n prompt = prompt[:prompt.index(\"please\")]\n while \"Jeeves\" in prompt:\n prompt = prompt[:prompt.index(\"Jeeves\")]\n try:\n while prompt[-1] == \" \": #Remove possible extra whitespace on end to trim for command/content\n prompt = prompt[:-1]\n except:\n pass\n\n if \"weather\" in prompt:\n command = \"weather\"\n elif \"show me files\" in prompt:\n command = \"filelist\"\n elif \"song\" in prompt or \"music\" in prompt:\n command = \"music\"\n elif \"summary\" in prompt or \"summarize\" in prompt or \"summarise\" in prompt:\n command = \"define\"\n elif \"shut\" in prompt and \"down\" in prompt:\n command = \"shutdown\"\n elif \"star trek\" in prompt or \"Star Trek\" in prompt or \"star Trek\" in prompt or \"Star trek\" in prompt:\n command = \"episode\"\n elif \"cast\" in prompt:\n command = \"cast\"\n elif \"headlines\" in prompt or \"headline\" in prompt or \"news\" in prompt:\n command = \"news\"\n else:\n try:\n if prompt[:10] == \"take notes\":\n command = \"take notes\"\n else:\n command = prompt[:prompt.index(\" \")]\n except:\n command = prompt\n \n if command == \"open\":\n app = prompt[prompt.index(command)+5:]\n\n print(\"Very well sir, I'll try and open {0} for you now...\".format(app))\n tts.speak(\"Very well sir, I'll try and open {0} for you now...\".format(app))\n\n try:\n if app == \"Microsoft teams\": #Specific fix for unrecognized name\n app = \"Microsoft Teams\"\n Commands.openApp(app,\"Teams\")\n else:\n Commands.openApp(app)\n except FileNotFoundError:\n print(\"I'm afraid I couldn't find the file sir. Would you like to try again?\")\n tts.speak(\"I'm afraid I couldn't find the file sir. Would you like to try again?\")\n\n\n elif command == \"take notes\":\n r.pause_threshold = 4 #Max time in seconds to allow for pauses without stopping recording\n\n print(\"Alright then sir, go ahead, my pen is ready.\")\n tts.speak(\"Alright then sir, go ahead, my pen is ready.\")\n\n with mic as source:\n audio = r.listen(source)\n\n try:\n notes = r.recognize_google(audio)\n Commands.dictate(notes)\n except:\n print(\"Sorry sir, I didn't quite gather anything there. Try and run me again.\")\n tts.speak(\"Sorry sir, I didn't quite gather anything there. Try and run me again.\")\n\n\n elif command == \"weather\":\n if \"1-hour\" in prompt or \"an hour\" in prompt:\n try:\n Commands.forecast(1)\n except:\n print(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n tts.speak(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n \n elif \"2 hours\" in prompt:\n try:\n Commands.forecast(2)\n except:\n print(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n tts.speak(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n \n elif \"3 hours\" in prompt:\n try:\n Commands.forecast(3)\n except:\n print(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n tts.speak(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n \n else:\n try:\n Commands.weather()\n except:\n print(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n tts.speak(\"Sorry sir, I couldn't fetch any data for that time. Perhaps try a different forecast.\")\n \n\n elif command == \"filelist\":\n if \"under the name\" in prompt: #Correct way for this program to refer to the name of the files\n loc = prompt.index(\"under the name\")\n name = prompt[loc+15:]\n\n try:\n Commands.filelist(\"..\",name)\n except:\n print(\"I've run into a problem with the name you gave me sir. Please try again.\")\n tts.speak(\"I've run into a problem with the name you gave me sir. Please try again.\")\n \n else:\n print(\"Sorry sir, I need you to phrase it as 'Show me files under the name...'.\")\n tts.speak(\"Sorry sir, I need you to phrase it as 'Show me files under the name...'.\")\n\n \n elif command == \"repeat\":\n while \"this for me\" in prompt:\n prompt = prompt.replace(\"this for me \", \"\")\n phrase = prompt[7:]\n print(\"I believe sir, you said '{0}'. Was I correct?\".format(phrase))\n tts.speak(\"I believe sir, you said '{0}'. Was I correct?\".format(phrase))\n\n elif command == \"music\":\n #Check Spotify API token is valid\n while str(requests.get(\"https://api.spotify.com/v1/playlists/566CNyS94IjywKSys66FJv\", headers = {\"Authorization\": \"Bearer \"+Commands.access})) != \"\":\n Commands.refreshSpotifyToken(spotifyRefreshToken,spotifyRefreshHeader)\n print(\"> Spotify Token Refreshed.\")\n continue\n\n if \"skip\" in prompt or \"next\" in prompt:\n #Next Song Post Request\n requests.post(\"https://api.spotify.com/v1/me/player/next\",headers = {\"Authorization\": \"Bearer \"+Commands.access})\n\n if \"previous\" in prompt or \"last\" in prompt:\n #Previous Song Post Request\n requests.post(\"https://api.spotify.com/v1/me/player/previous\",headers = {\"Authorization\": \"Bearer \"+Commands.access})\n \n elif \"pause\" in prompt:\n #Pause Song Put Request\n requests.put(\"https://api.spotify.com/v1/me/player/pause\", headers = {\"Authorization\": \"Bearer \"+Commands.access})\n \n elif \"play\" in prompt:\n #Play Song Put Request\n requests.put(\"https://api.spotify.com/v1/me/player/play\", headers = {\"Authorization\": \"Bearer \"+Commands.access})\n \n elif \"half volume\" in prompt or \"50%\" in prompt:\n #Half volume Put Request\n requests.put(\"https://api.spotify.com/v1/me/player/volume\", headers = {\"Authorization\": \"Bearer \"+Commands.access}, params = {\"volume_percent\":50})\n \n elif \"full volume\" in prompt or \"100%\" in prompt:\n #Full volume Put Request\n requests.put(\"https://api.spotify.com/v1/me/player/volume\", headers = {\"Authorization\": \"Bearer \"+Commands.access}, params = {\"volume_percent\":100})\n \n elif \"shuffle\" in prompt:\n #Shuffle music Put Request\n requests.put(\"https://api.spotify.com/v1/me/player/shuffle\",headers = {\"Authorization\": \"Bearer \"+Commands.access}, params = {\"state\":\"true\"})\n\n elif command == \"define\":\n if \"summarize\" in prompt:\n topic = prompt[prompt.index(\"summarize\")+10:]\n elif \"summarise\" in prompt:\n topic = prompt[prompt.index(\"summarise\")+10:]\n elif \"summary\" in prompt:\n topic = prompt[prompt.index(\"summary\")+11:]\n\n if \"sentences\" in prompt:\n if \"three\" in prompt or \"3\" in prompt:\n s = 3\n elif \"two\" in prompt or \"2\" in prompt:\n s = 2\n elif \"one\" in prompt or \"1\" in prompt:\n s = 1\n elif \"sentence\" in prompt:\n if \"three\" in prompt or \"3\" in prompt:\n s = 3\n elif \"two\" in prompt or \"2\" in prompt:\n s = 2\n elif \"one\" in prompt or \"1\" in prompt:\n s = 1\n else:\n try:\n s = int(prompt[prompt.index(\"sentence\")-2])\n except:\n pass\n\n topic[0].capitalize()\n\n try:\n Commands.define(topic,s)\n except:\n Commands.define(topic)\n\n elif command == \"shutdown\":\n print(\"Alright then sir, enjoy the rest of your day!\")\n tts.speak(\"Alright then sir, enjoy the rest of your day!\")\n time.sleep(1)\n playsound.playsound(\"jeeves_shutdown.mp3\")\n attending = False\n break\n\n elif command == \"episode\":\n print(\"I shall check for you now sir.\")\n tts.speak(\"I shall check for you now sir.\")\n try:\n Commands.episode()\n except:\n print(\"Sorry sir, I seem to have run into some issues. Perhaps you'll have to try again.\")\n tts.speak(\"Sorry sir, I seem to have run into some issues. Perhaps you'll have to try again.\")\n\n elif command == \"cast\":\n name = prompt[prompt.find(\"cast\")+8:]\n try:\n Commands.cast(name)\n except:\n print(\"Sorry sir, I was unable to fetch a cast for {0}.\".format(name))\n tts.speak(\"Sorry sir, I was unable to fetch a cast for {0}.\".format(name))\n\n elif command == \"news\":\n try:\n Commands.news()\n except:\n tts.speak(\"Sorry sir, I'm unable find any headlines at the moment. Perhaps try again later.\")\n\n else:\n print(\"I'm sorry sir, did you say '{0}'?\".format(command))\n print(\"Please go ahead and run me again, my hearing might be off.\")\n tts.speak(\"I'm sorry sir, did you say '{0}'? Please go ahead and run me again, my hearing might be off.\".format(command))\n \n caught = \"\" #Sound caught every two seconds on sleep cycle\n i = 0\n while i < 600:\n try:\n with mic as source: #Listen every two seconds\n audio = r.listen(source, timeout=2.5, phrase_time_limit=2.5)\n except:\n continue\n try:\n caught = r.recognize_google(audio) #Try to analyse\n except:\n pass\n \n try: #If Jeeves is called in the two second cycle, trigger callback\n if \"Jeeves\" in caught or \"geeves\" in caught or \"Jesus\" in caught or \"juice\" in caught or \"cheese\" in caught:\n callback = True\n break\n elif (\"skip\" in caught or \"next\" in caught) and (\"music\" in caught or \"song\" in caught):\n while str(requests.get(\"https://api.spotify.com/v1/playlists/566CNyS94IjywKSys66FJv\", headers = {\"Authorization\": \"Bearer \"+Commands.access})) != \"\":\n Commands.refreshSpotifyToken(spotifyRefreshToken,spotifyRefreshHeader)\n print(\"> Spotify Token Refreshed.\")\n continue\n #Next Song Post Request\n requests.post(\"https://api.spotify.com/v1/me/player/next\",headers = {\"Authorization\": \"Bearer \"+Commands.access})\n caught = \"\"\n pass\n except:\n pass\n i += 1\n\n #If callback triggered, restart Jeeves loop\n if callback == True:\n continue\n \n print(\"I'm shutting down now sir.\")\n tts.speak(\"I'm shutting down now sir.\")\n attending = False","repo_name":"ewanmiles/SoloProjects","sub_path":"Jeeves/Jeeves.py","file_name":"Jeeves.py","file_ext":"py","file_size_in_byte":29075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32261346235","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nclass MyMNISTDataset(Dataset):\n def __init__(self, npz_file, transform=None):\n data = np.load(npz_file)\n self.images = data['images']\n self.labels = data['labels']\n self.transform = transform\n\n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, idx):\n image = self.images[idx]\n label = self.labels[idx]\n\n if self.transform:\n image = self.transform(image)\n\n return image, label\n\n\n# 設定超參數\n\nbatch_size = 600\nlearning_rate = 0.0001\nnum_epochs = 100\n\n# 檢查 CUDA 是否可用\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if use_cuda else 'cpu')\nprint('Using device:', device)\n\n# 載入 MNIST 數據集\ntrain_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)\ntest_dataset = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor())\n\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n\n# 定義模型\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.fc = nn.Linear(7 * 7 * 64, 10)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n return out\n\ndef main():\n model = CNN()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n if os.path.exists('mnist_cnn.state_dict'):\n loaded_model = CNN().to(device)\n model_weights = torch.load('mnist_cnn.state_dict')\n \n # 移除 \"module.\" 前綴\n new_model_weights = {k.replace(\"module.\", \"\"): v for k, v in model_weights.items()}\n loaded_model.load_state_dict(new_model_weights)\n model = loaded_model\n \n # 檢查可用 GPU 數量\n if torch.cuda.device_count() > 1:\n print(f'Using {torch.cuda.device_count()} GPUs')\n # model = nn.DataParallel(model)\n \n model = model.to(device)\n \n # 損失函數和優化器\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n \n # 訓練模型\n model.train()\n for epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n images, labels = images.to(device), labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i + 1) % 100 == 0:\n print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.8f}')\n \n # 評估模型\n with torch.no_grad():\n model.eval()\n correct = 0\n total = 0\n for images, labels in test_loader:\n images, labels = images.to(device), labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n print(f'Test Accuracy: {100 * correct / total:.2f}%')\n torch.save(model.state_dict(), 'mnist_cnn.state_dict')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wwin3286tw/MNIST","sub_path":"gpu_train.py","file_name":"gpu_train.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42224589688","text":"import requests\nimport json\nfrom utils import log_model\n\nlog = log_model.OperationLog()\n\nclass OprationCMDB:\n\n def __init__(self):\n pass\n\n\n # def RouterSysInformationGET(self):\n # try:\n # log = LogOperation.OperationLog()\n # data = {\n # \"objectClass\":1043,\n # \"keyword\":\"1043\",\n # \"mode\":0,\n # \"columnNames\":[\"SYSNAME1\",\"STATUS\"],\n # \"conditions\":[{\"fieldName\":\"OBJECT_CLASS\",\"fieldValue\":\"1050\",\"mode\":2}]\n # }\n # data = json.dumps(data, ensure_ascii=False,indent=4)\n # url = 'http://10.47.174.136:9018/bomc-resource-api/cmdb/data/search'\n # response = requests.post(url, data=data)\n # log.logPrint(str(response))\n # except Exception as e:\n # log = LogOperation.OperationLog()\n # log.logPrint(e)\n # return e\n #\n # def SwitchSysInformationGET(self):\n # try:\n # log = LogOperation.OperationLog()\n # data = {\n # \"objectClass\":1043,\n # \"keyword\":\"1043\",\n # \"mode\":0,\n # \"columnNames\":[\"SYSNAME1\",\"STATUS\"],\n # \"conditions\":[{\"fieldName\":\"OBJECT_CLASS\",\"fieldValue\":\"1053\",\"mode\":2}]\n # }\n # data = json.dumps(data, ensure_ascii=False,indent=4)\n # url = 'http://10.47.174.136:9018/bomc-resource-api/cmdb/data/search'\n # response = requests.post(url, data=data)\n # log.logPrint(str(response))\n # except Exception as e:\n # log = LogOperation.OperationLog()\n # log.logPrint(e)\n # return e\n #\n # def RouterInformationGET(self):\n # try:\n # log = LogOperation.OperationLog()\n # data = {\n # \"objectClass\": 1054,\n # \"keyword\": \"1054\",\n # \"mode\": 0,\n # \"columnNames\": [\"SYSNAME1\", \"STATUS\"],\n # \"conditions\": [{\"fieldName\": \"OBJECT_CLASS\", \"fieldValue\": \"1054\", \"mode\": 2}]\n # }\n # data = json.dumps(data, ensure_ascii=False, indent=4)\n # url = 'http://10.47.174.136:9018/bomc-resource-api/cmdb/data/search'\n # response = requests.post(url, data=data)\n # log.logPrint(str(response))\n # except Exception as e:\n # log = LogOperation.OperationLog()\n # log.logPrint(e)\n # return e\n #\n # def InterfaceInformationGET(self):\n # try:\n # log = LogOperation.OperationLog()\n # data = {\n # \"objectClass\": 1045,\n # \"keyword\": \"1045\",\n # \"mode\": 0,\n # \"columnNames\": [\"SYSNAME1\", \"STATUS\"],\n # \"conditions\": [{\"fieldName\": \"OBJECT_CLASS\", \"fieldValue\": \"1045\", \"mode\": 2}]\n # }\n # data = json.dumps(data, ensure_ascii=False, indent=4)\n # url = 'http://10.47.174.136:9018/bomc-resource-api/cmdb/data/search'\n # response = requests.post(url, data=data)\n # log.logPrint(str(response))\n # except Exception as e:\n # log = LogOperation.OperationLog()\n # log.logPrint(e)\n # return e\n #\n # def RelatinonshipInformationGET(self):\n # try:\n # log = LogOperation.OperationLog()\n # data = {\n # \"objectClass\": 1046,\n # \"keyword\": \"1046\",\n # \"mode\": 0,\n # \"columnNames\": [\"SYSNAME1\", \"STATUS\"],\n # \"conditions\": [{\"fieldName\": \"OBJECT_CLASS\", \"fieldValue\": \"1046\", \"mode\": 2}]\n # }\n # data = json.dumps(data, ensure_ascii=False, indent=4)\n # url = 'http://10.47.174.136:9018/bomc-resource-api/cmdb/data/search'\n # response = requests.post(url, data=data)\n # log.logPrint(str(response))\n # except Exception as e:\n # log = LogOperation.OperationLog()\n # log.logPrint(e)\n # return e\n @log.classFuncDetail2Log('DEBUG')\n def RouterSysInformationPOST(self,dataDict,dataList):\n temp = {}\n temp['OBJECT_CLASS']= '1054'\n temp['SEARCHCODE']=''\n temp['NAME']=dataDict['sysName']\n temp['STATUS']='使用中'\n temp['SYSNAME']='网络管理SNMP'\n temp['OPERATOR']='李至恒'\n temp['NAME'] = dataDict['sysSNID']\n temp['SYSDESCR'] = dataDict['sysDescr']\n temp['SYSOBJECTID'] = dataDict['sysObjectID']\n temp['SYSUPTIME'] = dataDict['sysUpTime']\n temp['SYSCONTACT'] = dataDict['sysContact']\n temp['SYSNAME1'] = dataDict['sysName']\n temp['SYSLOCATION'] = dataDict['sysLocation']\n temp['SYSIFNUMBER'] = dataDict['sysIfNumber']\n temp['SYSFORWADING'] = dataDict['sysForwarding']\n temp['SYSSERVICES'] = dataDict['sysSevices']\n dataList.append(temp)\n return dataList\n\n @log.classFuncDetail2Log('DEBUG')\n def SwtichSysInformationPOST(self,dataDict,dataList):\n temp ={}\n temp['OBJECT_CLASS']= '1043'\n temp['SEARCHCODE']=''\n temp['NAME']=dataDict['sysName']\n temp['STATUS']='使用中'\n temp['SYSNAME']='网络管理SNMP'\n temp['OPERATOR']='李至恒'\n temp['NAME'] = dataDict['sysSNID']\n temp['SYSSNID'] = dataDict['sysSNID']\n temp['SYSDESCR'] = dataDict['sysDescr']\n temp['SYSOBJECTID'] = dataDict['sysObjectID']\n temp['SYSUPTIME'] = dataDict['sysUpTime']\n temp['SYSCONTACT'] = dataDict['sysContact']\n temp['SYSNAME1'] = dataDict['sysName']\n temp['SYSLOCATION'] = dataDict['sysLocation']\n temp['SYSIFNUMBER'] = dataDict['sysIfNumber']\n temp['SYSFORWADING'] = dataDict['sysForwarding']\n temp['SYSSERVICES'] = dataDict['sysSevices']\n dataList.append(temp)\n return dataList\n\n @log.classFuncDetail2Log('DEBUG')\n def RouterInformationPOST(self,dataDict,dataList,sysName):\n for row in dataDict:\n temp = {}\n temp['OBJECT_CLASS']= '1045'\n temp['SEARCHCODE']=''\n temp['STATUS']='使用中'\n temp['SYSNAME']='网络管理SNMP'\n temp['OPERATOR']='李至恒'\n temp['NAME'] = dataDict[row]['routerUniqueID']\n temp['ROUTERUNIQUEID'] = dataDict[row]['routerUniqueID']\n temp['SYSNAME1'] = sysName\n temp['ROUTERDEST'] = dataDict[row]['routerDest']\n temp['ROUTERIFINDEX'] = dataDict[row]['routerIfIndex']\n temp['ROUTERIFIP'] = dataDict[row]['routerIfIP']\n temp['ROUTERNEXTHOP'] = dataDict[row]['routerNextHop']\n temp['ROUTERMASK'] = dataDict[row]['routerMask']\n dataList.append(temp)\n return dataList\n\n @log.classFuncDetail2Log('DEBUG')\n def InterfaceInformationPOST(self,dataDict,dataList,sysName):\n for row in dataDict:\n temp = {}\n temp['OBJECT_CLASS']= '1050'\n temp['SEARCHCODE']=''\n temp['STATUS']='使用中'\n temp['SYSNAME']='网络管理SNMP'\n temp['OPERATOR']='李至恒'\n temp['NAME'] = dataDict[row]['interfaceUniqueID']\n temp['INTERFACEUNIQUEID'] = dataDict[row]['interfaceUniqueID']\n temp['SYSNAME1'] = sysName\n temp['INTERFACEID'] = dataDict[row]['interfaceID']\n temp['INTERFACENAME'] = dataDict[row]['interfaceName']\n temp['INTERFACEIP'] = dataDict[row]['interfaceIP']\n temp['INTERFACENETMASK'] = dataDict[row]['interfaceNetmask']\n temp['INTERFACEADMINSTATUS'] = dataDict[row]['interfaceAdminStatus']\n temp['INTERFACEOPERSTATUS'] = dataDict[row]['interfaceOperStatus']\n temp['INTLASTCHANGE'] = dataDict[row]['interfaceLastChange']\n temp['INTERFACEPHYSADDRESS'] = dataDict[row]['interfacePhysAddress']\n temp['INTERFACESDESC'] = dataDict[row]['interfaceDesc']\n dataList.append(temp)\n return dataList\n\n @log.classFuncDetail2Log('DEBUG')\n def RelationshipInformationPOST(self,dataDict,dataList,sysName):\n for row in dataDict:\n temp = {}\n temp['OBJECT_CLASS']= '1046'\n temp['SEARCHCODE']=''\n temp['STATUS']='使用中'\n temp['SYSNAME']='网络管理SNMP'\n temp['OPERATOR']='李至恒'\n temp['NAME'] = dataDict[row]['relativeUniqueID']\n temp['RELATIVEUNIQUEID'] = dataDict[row]['relativeUniqueID']\n temp['LOCALSYSNAME'] = sysName\n temp['LOCALINTERFACEID'] = dataDict[row]['localInterfaceID']\n temp['LOCALINTERFACEIP'] = dataDict[row]['localInterfaceIP']\n temp['PEERSYSNAME'] = dataDict[row]['peerSysName']\n temp['PEERINTERFACEIP'] = dataDict[row]['peerInterfaceIP']\n temp['PEERINTPHYSADDRESS'] = dataDict[row]['peerInterfacePhysAddress']\n temp['PEERSYSFORWADING'] = dataDict[row]['peerSysForwarding']\n dataList.append(temp)\n return dataList\n\n @log.classFuncDetail2Log('DEBUG')\n def AllInformationPOST(self,dataDict):\n sysName =dataDict['equipmentSysInformation']['sysName']\n dataList = []\n dataList = self.RouterSysInformationPOST(dataDict['equipmentSysInformation'],dataList)\n dataList = self.InterfaceInformationPOST(dataDict['equipmentInterfaceInformation'],dataList,sysName)\n dataList = self.RelationshipInformationPOST(dataDict['equipment2equipment'],dataList,sysName)\n dataList = self.RouterSysInformationPOST(dataDict['equipmentSysInformation'],dataList)\n dataList = self.RouterInformationPOST(dataDict['equipmentRouterInformation'],dataList,sysName)\n log = LogOperation.OperationLog()\n data = json.dumps(dataList, ensure_ascii=False, indent=4)\n url = 'http://10.47.174.136:9018/bomc-resource-api/cmdb/data/batchSaveOrUpdate'\n headers = {'content-type': 'application/json'}\n response = requests.post(url, data=data.encode(\"utf-8\"), headers=headers)\n return 0\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"coderlzh/snmpProject","sub_path":"utils/cmdb_model.py","file_name":"cmdb_model.py","file_ext":"py","file_size_in_byte":10175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"7956410647","text":"from matrix_client.api import MatrixHttpApi\nimport os\nimport sys\nimport yaml\n\nsettings = None\ntry:\n with open('settings.yaml', 'r') as stream:\n settings = yaml.load(stream)\nexcept IOError as e:\n sys.exit(\"no settings.yaml found!\")\n\n# CONSTANTS\nDOMAIN = settings['DOMAIN']\nSERVICES = settings['SERVICES']\nROOMS = settings['ROOMS']\nTOKEN = settings['TOKEN']\n\n\ndef online( service ):\n stat = os.system('systemctl is-active --quiet {}'.format(service))\n if not stat:\n return True\n return False\n\n\nmatrix = MatrixHttpApi(DOMAIN, token=TOKEN)\nstatus = False\nfor service in SERVICES:\n if not online(service):\n status = True\n message = \"Service: '{}' is offline\".format(service)\n print(message)\n for room in ROOMS:\n matrix.join_room(room)\n response = matrix.send_message(room, message)\nif not status:\n print(\"everything is fine\")\n","repo_name":"zeratax/matrix-status-bot","sub_path":"status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"970555970","text":"import tensorflow as tf\nfrom IPython.display import clear_output\nimport matplotlib.pyplot as plt\n\nclass TFRecordsReader2D():\n '''Reads tfrecords and batches it if required.\n \n Returns:\n tf.dataset: Batched tf dataset.\n '''\n def __init__(self, dataset, buffer_size, skip_slices, batch_size, image_height, image_width, image_height_resize, \n image_width_resize, num_classes):\n self.dataset = dataset\n self.buffer_size = buffer_size\n self.skip_slices = skip_slices\n self.batch_size = batch_size\n self.image_height = image_height\n self.image_width = image_width\n self.image_height_resize = image_height_resize\n self.image_width_resize = image_width_resize\n self.num_classes = num_classes\n \n @tf.function\n def load_image_train(self, feature):\n '''Decodes the raw tf data to floats for images and segmentations, \n it is therefore only usable for training, not testing purposes.\n \n Args:\n feature (tf.dtype): Example feature.\n \n Returns:\n float, float: image, segmentation\n '''\n # Decode the raw bytes to floats\n image = tf.io.decode_raw(feature['data/slice'], tf.float32)\n seg = tf.io.decode_raw(feature['data/seg'], tf.float32)\n\n # Reshape floats to original image sizes\n image = tf.reshape(image, shape=[self.image_height, self.image_width, 1])\n seg = tf.reshape(seg, shape=[self.image_height, self.image_width, 1])\n\n # Flip by 90 degrees\n image = tf.image.rot90(image, k=3, name=None)\n seg = tf.image.rot90(seg, k=3, name=None)\n\n # Resize images to fit input of Unet\n image = tf.image.resize(image, (self.image_height_resize, self.image_width_resize))\n seg = tf.image.resize(seg, (self.image_height_resize, self.image_width_resize))\n\n # One hot encoding\n #seg = tf.cast(seg, tf.int32)\n #seg = tf.squeeze(seg, axis=[2])\n #seg = tf.one_hot(seg, depth=self.num_classes)\n return image, seg\n\n def display(self, display_list):\n '''Displays a list of images.\n \n Args:\n display_list (array): Array of image tensors to display.\n '''\n plt.figure(figsize=(15, 15))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i], data_format='channels_last'))\n plt.axis('off')\n plt.show()\n \n def _parse_function(self, example_proto, features):\n '''Parse the input tf.Example proto using the dictionary above.\n \n Args:\n example_proto (tf.dtype): Input proto.\n features (tf.dtype): Features dictionary.\n \n Returns:\n tf.dtype: Parsed single example.\n '''\n feature = tf.io.parse_single_example(example_proto, features)\n return feature\n\n def read(self):\n features = {\n 'data/slice': tf.io.FixedLenFeature([], tf.string),\n 'data/seg': tf.io.FixedLenFeature([], tf.string),\n }\n \n parsed_dataset = self.dataset.map(lambda x: self._parse_function(x, features))\n dataset = parsed_dataset.map(self.load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n #test = parsed_dataset.map(self.load_image_test)\n\n # While validating (buffer_size is zero): data is not shuffled and not skipped\n if self.buffer_size==0:\n dataset_batched = dataset.batch(self.batch_size).repeat()\n # While training: data is shuffled\n elif self.skip_slices==0:\n dataset_batched = dataset.shuffle(self.buffer_size).batch(self.batch_size).repeat()\n # While training: slices may be skipped\n elif self.skip_slices>0:\n dataset_batched = dataset.skip(self.skip_slices).shuffle(self.buffer_size).batch(self.batch_size).repeat()\n else:\n raise ValueError(\"Skip_slices has invalid value.\")\n\n # Prefetch elements from the input dataset ahead of the time they are requested\n dataset_batched_prefetched = dataset_batched.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n \n #test_dataset = test.batch(self.batch_size)\n \n # Display sample images\n #for image, mask in train.take(1):\n # sample_image, sample_mask = image, mask\n # self.display([sample_image, sample_mask])\n\n return dataset_batched_prefetched\n","repo_name":"jbi35/seg_training","sub_path":"training/tfrecord_provider_2d.py","file_name":"tfrecord_provider_2d.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6791578882","text":"# NIM/Nama : 16518360/Ilham Syahid S\n# Tanggal : 9 September 2018\n# Deskripsi : Warung Indomie\n\nbayar = 0\n\nprint (\"Menu makanan:\") \nprint (\"1. Indomie Single\") \nprint (\"2. Indomie Double\") \nprint (\"3. Indomie Telor\") \nmakanan = int(input(\"Masukkan nomor menu makanan: \"))\n\nprint (\"Menu minuman:\") \nprint (\"1. Air Putih\") \nprint (\"2. Teh Manis\") \nprint (\"3. Kopi\") \nminuman = int(input(\"Masukkan nomor menu minuman: \"))\n\nif (makanan == 1):\n\tbayar += 4000\nelif (makanan == 2):\n\tbayar += 8000\nelif (makanan == 3):\n\tbayar += 7000\n\nif (minuman == 1):\n\tbayar += 0\nelif (minuman == 2):\n\tbayar += 2000\nelif (minuman == 3):\n\tbayar += 4000\n\t\nprint (\"Biaya yang harus dibayarkan: \" + str(bayar))\n","repo_name":"ilhamsyahids/PTI","sub_path":"Praktikum 1/PR/H01-16518360-01.py","file_name":"H01-16518360-01.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4524683540","text":"# $Id: best.py 7 2010-01-13 12:16:47Z mark $\r\n#\r\n# Best\r\n#\r\n# Very simple best move picker based only on static evaluation\r\n# Plan is to have a family of best move pickers. This is preliminiary\r\n# to doing a tree search version.\r\n#\r\n\r\nimport move, static, random\r\n\r\ndef forWhite(mvs):\r\n \"random move best for white paired with static score\"\r\n assert len(mvs)>0\r\n max_score = -99999\r\n for mv in mvs:\r\n move.make(mv)\r\n score = static.eval()\r\n if score > max_score:\r\n best,max_score = [mv],score\r\n elif score == max_score:\r\n best.append(mv)\r\n move.umake(mv)\r\n return (random.choice(best),score)\r\n\r\ndef forBlack(mvs):\r\n \"random move best for black paired with static score\"\r\n assert len(mvs)>0\r\n min_score = 99999\r\n for mv in mvs:\r\n move.make(mv)\r\n score = static.eval()\r\n if score < min_score:\r\n best,min_score = [mv],score\r\n elif score == min_score:\r\n best.append(mv)\r\n move.umake(mv)\r\n return (random.choice(best),score)\r\n\r\n","repo_name":"markcornwell/pychess","sub_path":"best.py","file_name":"best.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23788271187","text":"# © SleepECG developers\n#\n# License: BSD (3-clause)\n\n\"\"\"Functions related to classifier training and evaluation.\"\"\"\n\nfrom __future__ import annotations\n\nimport shutil\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom typing import Any, Optional, Protocol\nfrom zipfile import ZipFile\n\nimport numpy as np\nimport yaml\n\nfrom sleepecg.config import get_config\nfrom sleepecg.feature_extraction import extract_features\nfrom sleepecg.io.sleep_readers import SleepRecord, SleepStage\nfrom sleepecg.utils import _STAGE_NAMES, _merge_sleep_stages\n\n\ndef prepare_data_keras(\n features: list[np.ndarray],\n stages: list[np.ndarray],\n stages_mode: str,\n mask_value: int = -1,\n) -> tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Mask and pad data and calculate sample weights for a Keras model.\n\n The following steps are performed:\n\n - Merge sleep stages in `stages` according to `stage_mode`.\n - Set features corresponding to `SleepStage.UNDEFINED` to `mask_value`.\n - Replace `np.nan` and `np.inf` in `features` with `mask_value`.\n - Pad to a common length, where `mask_value` is used for `features` and\n `SleepStage.UNDEFINED` (i.e `0`) is used for stages.\n - One-hot encode stages.\n - Calculate sample weights with class weights taken as `n_samples /\n (n_classes * np.bincount(y))`.\n\n Parameters\n ----------\n features : list[np.ndarray]\n Each 2D array in this list is a feature matrix of shape `(n_samples, n_features)`\n corresponding to a single record as returned by `extract_features()`.\n stages : list[np.ndarray]\n Each 1D array in this list contains the sleep stages of a single record as returned\n by `extract_features()`.\n stages_mode : str\n Identifier of the grouping mode. Can be any of `'wake-sleep'`, `'wake-rem-nrem'`,\n `'wake-rem-light-n3'`, `'wake-rem-n1-n2-n3'`.\n mask_value : int, optional\n Value used to pad features and replace `np.nan` and `np.inf`, by default `-1`.\n Remember to pass the same value to `layers.Masking` in your model.\n\n Returns\n -------\n features_padded : np.ndarray\n A 3D array of shape `(n_records, max_n_samples, n_features)`, where `n_records` is\n the length of `features`/`stages` and `max_n_samples` is the maximum number of rows\n of all feature matrices in `features`.\n stages_padded_onehot : np.ndarray\n A 3D array of shape `(n_records, max_n_samples, n_classes+1)`, where `n_classes` is\n the number of classes remaining after merging sleep stages (excluding\n `SleepStage.UNDEFINED`).\n sample_weight : np.ndarray\n A 2D array of shape `(n_records, max_n_samples)`.\n \"\"\"\n from tensorflow.keras.preprocessing.sequence import pad_sequences\n from tensorflow.keras.utils import to_categorical\n\n stages_merged = _merge_sleep_stages(stages, stages_mode)\n stages_padded = pad_sequences(stages_merged, value=SleepStage.UNDEFINED)\n stages_padded_onehot = to_categorical(stages_padded)\n\n features_padded = pad_sequences(features, dtype=float, value=mask_value)\n features_padded[stages_padded == SleepStage.UNDEFINED, :] = mask_value\n features_padded[~np.isfinite(features_padded)] = mask_value\n\n stage_counts = stages_padded_onehot.sum(0).sum(0)\n # samples corresponding to SleepStage.UNDEFINED are ignored, so their count shouldn't\n # influence the class weights -> slice with [1:]\n class_weight = np.sum(stage_counts[1:]) / stage_counts\n sample_weight = class_weight[stages_padded]\n\n return features_padded, stages_padded_onehot, sample_weight\n\n\ndef print_class_balance(stages: np.ndarray, stages_mode: Optional[str] = None) -> None:\n \"\"\"\n Print the number of samples and percentages of each class in `stages`.\n\n Parameters\n ----------\n stages : np.ndarray\n A 2D array of shape `(n_records, n_samples)` containing integer class labels or a\n 3D array of shape `(n_records, n_samples, n_classes)` containing one-hot encoded\n class labels.\n stages_mode : str, optional\n Identifier of the grouping mode. Can be any of `'wake-sleep'`, `'wake-rem-nrem'`,\n `'wake-rem-light-n3'`, `'wake-rem-n1-n2-n3'`. If `None` (default), no class labels\n are printed.\n \"\"\"\n if stages.ndim == 3:\n stages = stages.argmax(2)\n\n if stages_mode is not None:\n stage_names = [\"UNDEFINED\"] + _STAGE_NAMES[stages_mode]\n else:\n stage_names = [str(n) for n in range(6)]\n\n print(\"Class balance:\")\n\n unique_stages, counts = np.unique(stages, return_counts=True)\n max_len_counts = len(str(max(counts)))\n max_len_stages = max(len(str(s)) for s in stage_names)\n total_count = counts.sum()\n for stage, count, fraction in zip(unique_stages, counts, counts / total_count):\n print(\n f\" {stage_names[stage]:>{max_len_stages}}: {count:{max_len_counts}} \"\n f\"({fraction:3.0%})\"\n )\n\n\ndef save_classifier(\n name: str,\n model: Any,\n stages_mode: str,\n feature_extraction_params: dict[str, Any],\n mask_value: Optional[int] = None,\n classifiers_dir: Optional[str | Path] = None,\n) -> None:\n \"\"\"\n Save a trained classifier to disk.\n\n The `model` itself and a `.yml` file containing classifier metadata are stored as\n `.zip` in `classifiers_dir`. Model serialization is performed as suggested by the\n respective package documentation. Currently only Keras models are supported.\n\n Parameters\n ----------\n name : str\n An identifier which is used as the filename.\n model : Any\n The classification model, should have `fit()` and `predict()` methods.\n stages_mode : str\n Identifier of the grouping mode. Can be any of `'wake-sleep'`, `'wake-rem-nrem'`,\n `'wake-rem-light-n3'`, or `'wake-rem-n1-n2-n3'`.\n feature_extraction_params : dict[str, typing.Any]\n The parameters passed to `extract_features()`, as a dictionary mapping string\n parameter names to values. Should not include `records` and `n_jobs`.\n mask_value : int, optional\n Only required for Keras models, as passed to `prepare_data_keras()` and\n `keras.layers.Masking`, by default `None`.\n classifiers_dir : str | pathlib.Path, optional\n Directory in which the `.zip` file is stored. If `None` (default), the value is\n taken from the configuration.\n\n See Also\n --------\n load_classifier : Load classifiers saved with this function.\n \"\"\"\n if classifiers_dir is None:\n classifiers_dir = get_config(\"classifiers_dir\")\n\n target_file = Path(classifiers_dir).expanduser() / name\n\n model_type = model.__module__.split(\".\")[0]\n classifier_info = {\n \"model_type\": model_type,\n \"stages_mode\": stages_mode,\n \"feature_extraction_params\": feature_extraction_params,\n }\n if mask_value is not None:\n classifier_info[\"mask_value\"] = mask_value\n\n with TemporaryDirectory() as tmpdir:\n with open(f\"{tmpdir}/info.yml\", \"w\") as infofile:\n yaml.dump(classifier_info, infofile)\n\n if model_type == \"keras\":\n model.save(f\"{tmpdir}/classifier\")\n else:\n raise ValueError(f\"Saving model of type {type(model)} is not supported\")\n\n shutil.make_archive(str(target_file), \"zip\", tmpdir)\n\n\nclass _Model(Protocol):\n def fit(self, X: np.ndarray, y: np.ndarray) -> None:\n ...\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n ...\n\n\n@dataclass\nclass SleepClassifier:\n \"\"\"\n Store a sleep classifier model and metadata.\n\n Attributes\n ----------\n model : _Model\n The classification model, should have `fit` and `predict` methods.\n stages_mode : str\n Identifier of the grouping mode. Can be any of `'wake-sleep'`, `'wake-rem-nrem'`,\n `'wake-rem-light-n3'`, or `'wake-rem-n1-n2-n3'`.\n feature_extraction_params : dict[str, typing.Any]\n The parameters passed to `extract_features()`, as a dictionary mapping string\n parameter names to values. Does not include `records` and `n_jobs`.\n model_type : str\n A string identifying the model type, e.g. `'keras'` or `'sklearn'`. This is used by\n `stage()` to determine how to perform sleep stage predictions.\n mask_value : int, optional\n Only required for models of type `'keras'`, as passed to `prepare_data_keras()` and\n `keras.layers.Masking`, by default `None`.\n source_file : pathlib.Path, optional\n The file from which the classifier was loaded using `load_classifier()`, by default\n `None`.\n \"\"\"\n\n model: _Model\n stages_mode: str\n feature_extraction_params: dict[str, Any]\n model_type: str\n mask_value: Optional[int] = None\n source_file: Optional[Path] = None\n\n def __repr__(self) -> str:\n if self.source_file is not None:\n return (\n f\"\"\n )\n return f\"\"\n\n def __str__(self) -> str:\n features = \", \".join(self.feature_extraction_params[\"feature_selection\"])\n return (\n f\"SleepClassifier for {self.stages_mode.upper()}\\n\"\n f\" features: {features}\\n\"\n f\" model type: {self.model_type}\\n\"\n f\" source file: {self.source_file}\\n\"\n )\n\n\ndef load_classifier(\n name: str,\n classifiers_dir: Optional[str | Path] = None,\n silence_tf_messages: bool = True,\n) -> SleepClassifier:\n \"\"\"\n Load a `SleepClassifier` from disk.\n\n This functions reads `.zip` files saved by `save_classifier()`. Pass `'SleepECG'` as the\n second argument to load a classifier bundled with SleepECG.\n\n Parameters\n ----------\n name : str\n The identifier of the classifier to load.\n classifiers_dir : str | pathlib.Path, optional\n Directory in which to look for `.zip`. If `None` (default), the value is taken\n from the configuration. If `'SleepECG'`, load classifiers from\n `site-packages/sleepecg/classifiers`.\n silence_tf_messages : bool, optional\n Whether or not to silence messages from TensorFlow when loading a model. By default\n `True`.\n\n Returns\n -------\n SleepClassifier\n Contains the model and metadata required for feature extraction and preprocessing.\n Can be passed to `stage()`.\n\n See Also\n --------\n list_classifiers : Show information about available classifiers.\n \"\"\"\n if classifiers_dir == \"SleepECG\":\n classifiers_dir = Path(__file__).parent / \"classifiers\"\n elif classifiers_dir is None:\n classifiers_dir = get_config(\"classifiers_dir\")\n\n soure_file = Path(classifiers_dir).expanduser() / f\"{name}.zip\"\n\n with TemporaryDirectory() as tmpdir:\n shutil.unpack_archive(soure_file, tmpdir)\n\n with open(f\"{tmpdir}/info.yml\") as infofile:\n classifier_info = yaml.safe_load(infofile)\n\n if classifier_info[\"model_type\"] == \"keras\":\n import os\n\n environ_orig = os.environ.copy()\n if silence_tf_messages:\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n from tensorflow import keras\n\n try:\n classifier = keras.models.load_model(f\"{tmpdir}/classifier\")\n finally:\n os.environ.clear()\n os.environ.update(environ_orig)\n\n else:\n raise ValueError(\n f\"Loading model of type {classifier_info['model_type']} is not supported\"\n )\n\n return SleepClassifier(\n model=classifier,\n source_file=soure_file,\n **classifier_info,\n )\n\n\ndef list_classifiers(classifiers_dir: Optional[str | Path] = None) -> None:\n \"\"\"\n List available classifiers.\n\n Parameters\n ----------\n classifiers_dir : str | pathlib.Path, optional\n Directory in which to look for classifiers. If `None` (default), the value is taken\n from the configuration. If `'SleepECG'`, `site-packages/sleepecg/classifiers` is\n used.\n\n See Also\n --------\n load_classifier : Load classifiers.\n \"\"\"\n if classifiers_dir == \"SleepECG\":\n classifiers_dir = Path(__file__).parent / \"classifiers\"\n print(\"Classifiers in SleepECG:\")\n elif classifiers_dir is None:\n classifiers_dir = get_config(\"classifiers_dir\")\n print(f\"Classifiers in {classifiers_dir}:\")\n else:\n print(f\"Classifiers in {classifiers_dir}:\")\n\n classifiers_dir = Path(classifiers_dir).expanduser()\n\n for classifier_filepath in classifiers_dir.glob(\"*.zip\"):\n with ZipFile(classifier_filepath, \"r\") as zip_file:\n with zip_file.open(\"info.yml\") as infofile:\n classifier_info = yaml.safe_load(infofile)\n features = \", \".join(\n classifier_info[\"feature_extraction_params\"][\"feature_selection\"]\n )\n print(\n f\" {classifier_filepath.stem}\\n\"\n f\" stages_mode: {classifier_info['stages_mode'].upper()}\\n\"\n f\" model type: {classifier_info['model_type']}\\n\"\n f\" features: {features}\\n\"\n )\n\n\ndef _confusion_matrix(y_true: np.ndarray, y_pred: np.ndarray, N: int) -> np.ndarray:\n \"\"\"\n Compute confusion matrix.\n\n Parameters\n ----------\n y_true : np.ndarray\n Ground truth (correct) target values.\n y_pred : np.ndarray\n Estimated targets as returned by a classifier.\n N : int\n Number of unique classes.\n\n Returns\n -------\n np.ndarray\n Confusion matrix whose i-th row and j-th column entry indicates the number of\n samples with true label being i-th class and predicted label being j-th class.\n \"\"\"\n return np.bincount(N * y_true + y_pred, minlength=N * N).reshape(N, N)\n\n\ndef _cohen_kappa(confmat: np.ndarray) -> float:\n \"\"\"\n Compute Cohen's kappa (which measures inter-annotator agreement).\n\n Implementation modified from `sklearn.metrics.cohen_kappa_score`.\n\n Parameters\n ----------\n confmat : np.ndarray\n A confusion matrix, as returned by `confusion_matrix()`.\n\n Returns\n -------\n float\n The kappa statistic, which is a number between -1 and 1. The maximum value means\n complete agreement; zero or lower means chance agreement.\n \"\"\"\n n_classes = confmat.shape[0]\n sum0 = np.sum(confmat, axis=0)\n sum1 = np.sum(confmat, axis=1)\n expected = np.outer(sum0, sum1) / np.sum(sum0)\n w_mat = 1 - np.eye(n_classes, dtype=int)\n k: float = np.sum(w_mat * confmat) / np.sum(w_mat * expected)\n return 1 - k\n\n\ndef evaluate(\n stages_true: np.ndarray,\n stages_pred: np.ndarray,\n stages_mode: str,\n show_undefined: bool = False,\n) -> tuple[np.ndarray, list[str]]:\n \"\"\"\n Evaluate the performance of a sleep stage classifier.\n\n Prints overall accuracy, Cohen's kappa, confusion matrix, per-class precision, recall,\n and F1 score.\n\n Parameters\n ----------\n stages_true : np.ndarray\n The annotated (ground truth) sleep stages as a 2D array of shape\n `(n_records, n_samples)` containing integer class labels, or a 3D array of shape\n `(n_records, n_samples, n_classes)` containing one-hot encoded class labels.\n stages_pred : np.ndarray\n The predicted sleep stages as a 2D array of shape `(n_records, n_samples)`\n containing integer class labels, or a 3D array of shape\n `(n_records, n_samples, n_classes)` containing one-hot encoded class labels.\n stages_mode : str\n Identifier of the grouping mode. Can be any of `'wake-sleep'`, `'wake-rem-nrem'`,\n `'wake-rem-light-n3'`, `'wake-rem-n1-n2-n3'`.\n show_undefined : bool, optional\n If `True`, include `SleepStage.UNDEFINED` (i.e `0`) in the confusion matrix output.\n This can be helpful during debugging. By default `False`.\n\n Returns\n -------\n conf_mat : np.ndarray\n Confusion matrix.\n stage_names : list[str]\n Sleep stage names.\n \"\"\"\n stage_names = _STAGE_NAMES[stages_mode]\n\n if stages_true.ndim == 3:\n stages_true = stages_true.argmax(2)\n if stages_pred.ndim == 3:\n stages_pred = stages_pred.argmax(2)\n\n confmat_full = _confusion_matrix(\n stages_true.ravel(),\n stages_pred.ravel(),\n len(stage_names) + 1,\n )\n confmat = confmat_full[1:, 1:]\n\n print(f\"Confusion matrix ({stages_mode.upper()}):\")\n if show_undefined:\n print(confmat_full)\n else:\n print(confmat)\n\n kappa = _cohen_kappa(confmat)\n\n acc = confmat.trace() / confmat.sum()\n tp = np.diag(confmat)\n fp = confmat.sum(1) - tp\n fn = confmat.sum(0) - tp\n precision = tp / (tp + fn)\n recall = tp / (tp + fp)\n f1 = 2 / (recall**-1 + precision**-1)\n support = confmat.sum(1)\n\n print(f\"Accuracy: {acc:.4f}\")\n print(f\"Cohen's kappa: {kappa:.4f}\")\n print(\" precision recall f1-score support\")\n for i, stage_name in enumerate(stage_names):\n print(\n f\"{stage_name:>5}{precision[i]:11.2f}{recall[i]:10.2f}{f1[i]:10.2f}\"\n f\"{support[i]:11}\"\n )\n print(f\"{support.sum():47}\")\n\n return confmat_full, stage_names\n\n\ndef stage(\n clf: SleepClassifier,\n record: SleepRecord,\n return_mode: str = \"int\",\n) -> np.ndarray:\n \"\"\"\n Predict sleep stages for a single record.\n\n Feature extraction and preprocessing are performed according to the information stored\n in `clf`.\n\n Parameters\n ----------\n clf : SleepClassifier\n A classifier object as loaded with `load_classifier()`.\n record : SleepRecord\n A single record (i.e. night).\n return_mode : str, optional\n If `'int'`, return the predicted sleep stages as a 1D array of integers. If\n `'prob'`, return a 2D array of probabilities. If `'str'`, return a 1D array of\n strings.\n\n Returns\n -------\n np.ndarray\n Array of sleep stages. Depending on `return_mode`, this takes different forms.\n\n Warnings\n --------\n Note that the returned labels depend on `clf.stages_mode`, so they do not necessarily\n follow the stage-to-integer mapping defined in `SleepStage`. See\n [classification](../classification.md) for details.\n \"\"\"\n return_modes = {\"int\", \"prob\", \"str\"}\n if return_mode not in return_modes:\n raise ValueError(\n f\"Invalid return_mode: {return_mode!r}. Possible options: {return_modes}\"\n )\n\n stage_names = [\"UNDEFINED\"] + _STAGE_NAMES[clf.stages_mode]\n\n features = extract_features(records=[record], **clf.feature_extraction_params)[0][0]\n if clf.model_type == \"keras\":\n features[~np.isfinite(features)] = clf.mask_value\n stages_pred_proba: np.ndarray = clf.model.predict(features[np.newaxis, ...])[0]\n stages_pred: np.ndarray = stages_pred_proba.argmax(-1)\n else:\n raise ValueError(f\"Staging with model of type {type(clf)} is not supported\")\n\n if return_mode == \"prob\":\n return stages_pred_proba\n elif return_mode == \"str\":\n return np.array([stage_names[s] for s in stages_pred])\n return stages_pred\n","repo_name":"cbrnr/sleepecg","sub_path":"sleepecg/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":19356,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"53"}
+{"seq_id":"18093640682","text":"# trova quante volte una sequenza di caratteri appare in un altra sequenza\r\n\r\n\r\n\r\n\r\n\r\nb = ['c','d', 'c', 'a', 'b', 'a', 'b', 'a', 'c', 'a', 'b', 'a', 'b', 'c', 'd', 'b', 'a', 'b', 'b', 'a','b', 'a', 'a', 'b']\r\na = ['a','b','a']\r\n\r\nprint(occorrenza_sequenza(a,b))","repo_name":"FrancescoRomeo02/UniversityNotes","sub_path":"LT_21-22/SEM2/ASD/Exercises/Iteratives/occorrenza.py","file_name":"occorrenza.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"it","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"10147334945","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Introduction\n# \n# I believe the main issue we have in this challenge is not to predict revenues but more to get these zeros right since less than 1.3 % of the sessions have a non-zero revenue.\n# \n# The idea in this kernel is to classify non-zero transactions first and use that to help our regressor get better results.\n# \n# The kernel only presents one way of doing it. No special feature engineering or set of hyperparameters, just a code shell/structure ;-) \n\n# ### Check file structure\n\n# In[ ]:\n\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# ### Import packages\n\n# In[ ]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.cbook as cbook\nimport seaborn as sns\nfrom sklearn.metrics import mean_squared_error, roc_auc_score, log_loss\nimport gc\nimport time\nfrom pandas.core.common import SettingWithCopyWarning\nimport warnings\nimport lightgbm as lgb\nfrom sklearn.model_selection import KFold, GroupKFold\n\nwarnings.simplefilter('error', SettingWithCopyWarning)\ngc.enable()\n\n# ### Get data\n\n# In[ ]:\n\n\ntrain = pd.read_csv('../input/create-extracted-json-fields-dataset/extracted_fields_train.gz', \n dtype={'date': str, 'fullVisitorId': str, 'sessionId':str}, nrows=None)\ntest = pd.read_csv('../input/create-extracted-json-fields-dataset/extracted_fields_test.gz', \n dtype={'date': str, 'fullVisitorId': str, 'sessionId':str}, nrows=None)\ntrain.shape, test.shape\n\n# ### Get targets\n\n# In[ ]:\n\n\ny_clf = (train['totals.transactionRevenue'].fillna(0) > 0).astype(np.uint8)\ny_reg = train['totals.transactionRevenue'].fillna(0)\ndel train['totals.transactionRevenue']\ny_clf.mean(), y_reg.mean()\n\n# ### Add date features\n\n# In[ ]:\n\n\nfor df in [train, test]:\n df['date'] = pd.to_datetime(df['date'])\n df['vis_date'] = pd.to_datetime(df['visitStartTime'])\n df['sess_date_dow'] = df['vis_date'].dt.dayofweek\n df['sess_date_hours'] = df['vis_date'].dt.hour\n df['sess_date_dom'] = df['vis_date'].dt.day\n\n# ### Create list of features\n\n# In[ ]:\n\n\nexcluded_features = [\n 'date', 'fullVisitorId', 'sessionId', 'totals.transactionRevenue', \n 'visitId', 'visitStartTime', 'non_zero_proba', 'vis_date'\n]\n\ncategorical_features = [\n _f for _f in train.columns\n if (_f not in excluded_features) & (train[_f].dtype == 'object')\n]\n\nif 'totals.transactionRevenue' in train.columns:\n del train['totals.transactionRevenue']\n\nif 'totals.transactionRevenue' in test.columns:\n del test['totals.transactionRevenue']\n\n# ### Factorize categoricals\n\n# In[ ]:\n\n\nfor f in categorical_features:\n train[f], indexer = pd.factorize(train[f])\n test[f] = indexer.get_indexer(test[f])\n\n# ### Classify non-zero revenues\n\n# In[ ]:\n\n\nfolds = GroupKFold(n_splits=5)\n\ntrain_features = [_f for _f in train.columns if _f not in excluded_features]\nprint(train_features)\noof_clf_preds = np.zeros(train.shape[0])\nsub_clf_preds = np.zeros(test.shape[0])\nfor fold_, (trn_, val_) in enumerate(folds.split(y_clf, y_clf, groups=train['fullVisitorId'])):\n trn_x, trn_y = train[train_features].iloc[trn_], y_clf.iloc[trn_]\n val_x, val_y = train[train_features].iloc[val_], y_clf.iloc[val_]\n \n clf = lgb.LGBMClassifier(\n num_leaves=31,\n learning_rate=0.03,\n n_estimators=1000,\n subsample=.9,\n colsample_bytree=.9,\n random_state=1\n )\n clf.fit(\n trn_x, trn_y,\n eval_set=[(val_x, val_y)],\n early_stopping_rounds=50,\n verbose=50\n )\n \n oof_clf_preds[val_] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]\n print(roc_auc_score(val_y, oof_clf_preds[val_]))\n sub_clf_preds += clf.predict_proba(test[train_features], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits\n \nroc_auc_score(y_clf, oof_clf_preds)\n\n# ### Add classification to dataset\n\n# In[ ]:\n\n\ntrain['non_zero_proba'] = oof_clf_preds\ntest['non_zero_proba'] = sub_clf_preds\n\n# ### Predict revenues at session level\n\n# In[ ]:\n\n\ntrain_features = [_f for _f in train.columns if _f not in excluded_features] + ['non_zero_proba']\nprint(train_features)\n\noof_reg_preds = np.zeros(train.shape[0])\nsub_reg_preds = np.zeros(test.shape[0])\nimportances = pd.DataFrame()\n\nfor fold_, (trn_, val_) in enumerate(folds.split(y_reg, y_reg, groups=train['fullVisitorId'])):\n trn_x, trn_y = train[train_features].iloc[trn_], y_reg.iloc[trn_].fillna(0)\n val_x, val_y = train[train_features].iloc[val_], y_reg.iloc[val_].fillna(0)\n \n reg = lgb.LGBMRegressor(\n num_leaves=31,\n learning_rate=0.03,\n n_estimators=1000,\n subsample=.9,\n colsample_bytree=.9,\n random_state=1\n )\n reg.fit(\n trn_x, np.log1p(trn_y),\n eval_set=[(val_x, np.log1p(val_y))],\n early_stopping_rounds=50,\n verbose=50\n )\n imp_df = pd.DataFrame()\n imp_df['feature'] = train_features\n imp_df['gain'] = reg.booster_.feature_importance(importance_type='gain')\n \n imp_df['fold'] = fold_ + 1\n importances = pd.concat([importances, imp_df], axis=0, sort=False)\n \n oof_reg_preds[val_] = reg.predict(val_x, num_iteration=reg.best_iteration_)\n oof_reg_preds[oof_reg_preds < 0] = 0\n _preds = reg.predict(test[train_features], num_iteration=reg.best_iteration_)\n _preds[_preds < 0] = 0\n sub_reg_preds += np.expm1(_preds) / folds.n_splits\n \nmean_squared_error(np.log1p(y_reg.fillna(0)), oof_reg_preds) ** .5\n\n# In[ ]:\n\n\nimport warnings\nwarnings.simplefilter('ignore', FutureWarning)\n\nimportances['gain_log'] = np.log1p(importances['gain'])\nmean_gain = importances[['gain', 'feature']].groupby('feature').mean()\nimportances['mean_gain'] = importances['feature'].map(mean_gain['gain'])\n\nplt.figure(figsize=(8, 12))\nsns.barplot(x='gain_log', y='feature', data=importances.sort_values('mean_gain', ascending=False))\n\n# ### Save predictions\n# \n# Maybe one day Kaggle will support file compression for submissions from kernels...\n# \n# I'm aware I sum the logs instead of summing the actual revenues...\n\n# In[ ]:\n\n\ntest['PredictedLogRevenue'] = sub_reg_preds\ntest[['fullVisitorId', 'PredictedLogRevenue']].groupby('fullVisitorId').sum()['PredictedLogRevenue'].apply(np.log1p).reset_index()\\\n .to_csv('test_clf_reg_log_of_sum.csv', index=False)\n\n# ### Plot Actual Dollar estimates per dates\n\n# In[ ]:\n\n\n# Go to actual revenues\ntrain['PredictedRevenue'] = np.expm1(oof_reg_preds)\ntest['PredictedRevenue'] = sub_reg_preds\ntrain['totals.transactionRevenue'] = y_reg\n\n# Sum by date on train and test\ntrn_group = train[['date', 'PredictedRevenue', 'totals.transactionRevenue']].groupby('date').sum().reset_index()\nsub_group = test[['date', 'PredictedRevenue']].groupby('date').sum().reset_index()\n\n# Now plot all this\nyears = mdates.YearLocator() # every year\nmonths = mdates.MonthLocator() # every month\nyearsFmt = mdates.DateFormatter('%Y-%m')\n\nfig, ax = plt.subplots(figsize=(15, 6))\nax.set_title('Actual Dollar Revenues - we are way off...', fontsize=15, fontweight='bold')\nax.plot(pd.to_datetime(trn_group['date']).values, trn_group['totals.transactionRevenue'].values)\nax.plot(pd.to_datetime(trn_group['date']).values, trn_group['PredictedRevenue'].values)\nax.plot(pd.to_datetime(sub_group['date']).values, sub_group['PredictedRevenue'].values)\n\n# # format the ticks\nax.xaxis.set_major_locator(months)\nax.xaxis.set_major_formatter(yearsFmt)\nax.xaxis.set_minor_locator(months)\n\nax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n# # ax.format_ydata = price\nax.grid(True)\n\n# rotates and right aligns the x labels, and moves the bottom of the\n# axes up to make room for them\nfig.autofmt_xdate()\n\n# ### Display using np.log1p\n\n# In[ ]:\n\n\n# Go to actual revenues\ntrain['PredictedRevenue'] = np.expm1(oof_reg_preds)\ntest['PredictedRevenue'] = sub_reg_preds\ntrain['totals.transactionRevenue'] = y_reg\n\n# Sum by date on train and test\ntrn_group = train[['date', 'PredictedRevenue', 'totals.transactionRevenue']].groupby('date').sum().reset_index()\nsub_group = test[['date', 'PredictedRevenue']].groupby('date').sum().reset_index()\n\nyears = mdates.YearLocator() # every year\nmonths = mdates.MonthLocator() # every month\nyearsFmt = mdates.DateFormatter('%Y-%m')\n\nfig, ax = plt.subplots(figsize=(15, 6))\nax.set_title('We are also off in logs... or am I just stupid ?', fontsize=15, fontweight='bold')\nax.plot(pd.to_datetime(trn_group['date']).values, np.log1p(trn_group['totals.transactionRevenue'].values))\nax.plot(pd.to_datetime(trn_group['date']).values, np.log1p(trn_group['PredictedRevenue'].values))\nax.plot(pd.to_datetime(sub_group['date']).values, np.log1p(sub_group['PredictedRevenue'].values))\n\n# # format the ticks\nax.xaxis.set_major_locator(months)\nax.xaxis.set_major_formatter(yearsFmt)\nax.xaxis.set_minor_locator(months)\n\nax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n# # ax.format_ydata = price\nax.grid(True)\n\n# rotates and right aligns the x labels, and moves the bottom of the\n# axes up to make room for them\nfig.autofmt_xdate()\n\n# ### Using sum of logs - no really ?\n\n# In[ ]:\n\n\n# Keep amounts in logs\ntrain['PredictedRevenue'] = oof_reg_preds\ntest['PredictedRevenue'] = np.log1p(sub_reg_preds)\ntrain['totals.transactionRevenue'] = np.log1p(y_reg)\n\n# You really mean summing up the logs ???\ntrn_group = train[['date', 'PredictedRevenue', 'totals.transactionRevenue']].groupby('date').sum().reset_index()\nsub_group = test[['date', 'PredictedRevenue']].groupby('date').sum().reset_index()\n\nyears = mdates.YearLocator() # every year\nmonths = mdates.MonthLocator() # every month\nyearsFmt = mdates.DateFormatter('%Y-%m')\n\nfig, ax = plt.subplots(figsize=(15, 6))\nax.set_title('Summing up logs looks a lot better !?! Is the challenge to find the correct metric ???', fontsize=15, fontweight='bold')\nax.plot(pd.to_datetime(trn_group['date']).values, trn_group['totals.transactionRevenue'].values)\nax.plot(pd.to_datetime(trn_group['date']).values, trn_group['PredictedRevenue'].values)\nax.plot(pd.to_datetime(sub_group['date']).values, sub_group['PredictedRevenue'].values)\n\n# # format the ticks\nax.xaxis.set_major_locator(months)\nax.xaxis.set_major_formatter(yearsFmt)\nax.xaxis.set_minor_locator(months)\n\nax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n# # ax.format_ydata = price\nax.grid(True)\n\n# rotates and right aligns the x labels, and moves the bottom of the\n# axes up to make room for them\nfig.autofmt_xdate()\n\n# The issue is that the model highly underestimates the log revenues. As a consequence, going back to actual dollar revenues underestimates even more transactions. Now if you sum up a high number of transactions (like we do to display things at date level) will get estimates really off. This is what the 1st plot demonstrates.\n# \n# The 2nd plot shows the same thing but in log. Again this is due to the number of transactions we need to aggregate at date level.\n# \n# Due to the underestimation, summing up log revenues (i.e. in fact multiplying them) will get revenues on the same range as actual revenues. This is just a proof of our underestimation at session level.\n# \n# As it's been said on the forum we don't have a lot of Visitors with lots of sessions and this is why Visitor RMSE and Session RMSE are on the same scale.\n# \n# \n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample54.py","file_name":"sample54.py","file_ext":"py","file_size_in_byte":11240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"16780912608","text":"import pandas as pd\nfrom clearml import Dataset\nfrom configs.configs import DATASET_NAME, PROJECT_NAME\nfrom root import PROCESSED_DIR\nfrom src.utils.logger import get_logger\nfrom src.utils.task_utils import save_json\n\nlogger = get_logger(\"logs\", __name__)\n\n\ndef to_feature_store(data: pd.DataFrame, metadata: dict, parent_datasets_id: str):\n ds = Dataset.create(\n dataset_name=DATASET_NAME,\n dataset_project=PROJECT_NAME,\n dataset_version=metadata[\"feature_group_version\"],\n # use_current_task=True,\n parent_datasets=[parent_datasets_id],\n description=\"Denmark hourly energy consumption data. Data is uploaded with an 15 days delay.\",\n dataset_tags=[\"storage\", metadata[\"export_datetime_utc_start\"], metadata[\"export_datetime_utc_end\"]],\n )\n\n parent_dataset = Dataset.get(parent_datasets_id)\n local_path = parent_dataset.get_local_copy()\n\n if \"processed.csv\" in parent_dataset.list_files():\n df = pd.read_csv(f\"{local_path}/processed.csv\")\n df.drop(columns=[\"index\"], inplace=True)\n df[\"datetime_utc\"] = pd.to_datetime(df[\"datetime_utc\"])\n\n data_ = pd.concat([df, data], ignore_index=True)\n data_.sort_values(by=\"datetime_utc\", inplace=True, ascending=False)\n data_.drop_duplicates(keep=\"first\", inplace=True)\n\n data_.to_csv(PROCESSED_DIR / \"processed.csv\", index_label=[\"index\"])\n else:\n data_ = data.copy()\n data_.to_csv(PROCESSED_DIR / \"processed.csv\", index_label=[\"index\"])\n\n ds.add_files(path=PROCESSED_DIR / \"processed.csv\", verbose=True)\n\n metadata[\"feature_store_id\"] = ds.id\n save_json(metadata, PROCESSED_DIR / \"metadata.json\")\n ds.add_files(path=PROCESSED_DIR / \"metadata.json\", verbose=True)\n\n ds.upload(verbose=True)\n ds.finalize()\n\n return ds, metadata\n","repo_name":"quantran-13/cs2215-ECD-mlops","sub_path":"src/feature_pipeline/src/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14034080982","text":"L = [ x*x for x in range(1,10)]\nprint(L)\ng = ( x*x for x in range(1,10))\nfor n in g:\n print(n)\n\n\n# 斐波拉契数列\ndef fib(max):\n n,a,b = 0,0,1\n while n < max:\n # print(b)\n yield b\n a,b = b,a+b\n n = n + 1\n return 'done'\n\n# print(fib(6))\nfor n in fib(6):\n print(n)\ndef odd():\n print('step1')\n yield 1\n print('step2')\n yield(3)\n print('step3')\n yield(5)\n\no = odd()\nprint(next(o))\nprint(next(o))\nprint(next(o))\n\n\n","repo_name":"bubbyqi/python","sub_path":"生成器-斐波拉契数列.py","file_name":"生成器-斐波拉契数列.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26526171977","text":"from django.test import TestCase\n\nfrom pipeline.contrib.external_plugins.models import GIT, S3, FILE_SYSTEM, S3Source, FileSystemSource\n\nfrom gcloud.tests.external_plugins.mock import * # noqa\nfrom gcloud.tests.external_plugins.mock_settings import * # noqa\nfrom gcloud.external_plugins import exceptions\nfrom gcloud.external_plugins.models.cache import CachePackageSource\n\n\nclass TestCachePackageSource(TestCase):\n def setUp(self):\n self.CACHE_SOURCE_NAME = \"CACHE_S3_SOURCE\"\n self.UPDATE_CACHE_SOURCE_NAME = \"CACHE_FILE_SYSTEM_SOURCE\"\n self.SOURCE_TYPE = S3\n self.UPDATE_SOURCE_TYPE = FILE_SYSTEM\n self.SOURCE_PACKAGES = {\n \"root_package_1\": {\"version\": \"\", \"modules\": [\"test1\", \"test2\"]},\n \"root_package_2\": {\"version\": \"\", \"modules\": [\"test3\", \"test4\"]},\n \"root_package_3\": {\"version\": \"\", \"modules\": [\"test5\", \"test6\"]},\n }\n self.UPDATED_SOURCE_PACKAGES = {\n \"root_package_1\": {\"version\": \"\", \"modules\": [\"test1\", \"test2\"]},\n }\n self.SOURCE_KWARGS = {\n \"service_address\": \"service_address\",\n \"bucket\": \"bucket\",\n \"access_key\": \"access_key\",\n \"secret_key\": \"secret_key\",\n \"source_dir\": \"\",\n }\n self.UPDATED_SOURCE_KWARGS = {\"path\": \"/tmp\"}\n self.cache_source = CachePackageSource.objects.add_cache_source(\n name=self.CACHE_SOURCE_NAME,\n source_type=self.SOURCE_TYPE,\n packages=self.SOURCE_PACKAGES,\n **self.SOURCE_KWARGS\n )\n\n def tearDown(self):\n caches = CachePackageSource.objects.all()\n for cache in caches:\n cache.delete()\n\n def test_base_source(self):\n base_source = S3Source.objects.get(id=self.cache_source.base_source_id)\n self.assertEqual(self.cache_source.base_source, base_source)\n self.assertEqual(base_source.packages, self.SOURCE_PACKAGES)\n\n def test_get_base_source(self):\n self.assertEqual(CachePackageSource.objects.get_base_source(), self.cache_source.base_source)\n\n def test_add_cache_source__exception(self):\n self.assertRaises(\n exceptions.CacheSourceTypeError,\n CachePackageSource.objects.add_cache_source,\n name=self.CACHE_SOURCE_NAME,\n source_type=GIT,\n packages=self.SOURCE_PACKAGES,\n **self.SOURCE_KWARGS\n )\n self.assertRaises(\n exceptions.MultipleCacheSourceError,\n CachePackageSource.objects.add_cache_source,\n name=self.CACHE_SOURCE_NAME,\n source_type=self.SOURCE_TYPE,\n packages=self.SOURCE_PACKAGES,\n **self.SOURCE_KWARGS\n )\n\n def test_name(self):\n self.assertEqual(self.cache_source.name, self.CACHE_SOURCE_NAME)\n\n def test_packages(self):\n self.assertEqual(self.cache_source.packages, self.SOURCE_PACKAGES)\n\n def test_details(self):\n self.assertEqual(self.cache_source.details, self.SOURCE_KWARGS)\n\n def test_write__type_error(self):\n self.cache_source.type = \"error_type\"\n self.assertRaises(exceptions.CacheSourceTypeError, self.cache_source.write)\n\n @patch(GCLOUD_EXTERNAL_PLUGINS_MODELS_CACHE_WRITER_CLS_FACTORY, MockClsFactory())\n def test_write(self):\n self.assertIsNone(self.cache_source.write())\n\n def test_update_base_source(self):\n CachePackageSource.objects.update_base_source(\n package_source_id=self.cache_source.id,\n source_type=self.UPDATE_SOURCE_TYPE,\n packages=self.UPDATED_SOURCE_PACKAGES,\n **self.UPDATED_SOURCE_KWARGS\n )\n self.cache_source = CachePackageSource.objects.get(id=self.cache_source.id)\n self.assertEqual(self.cache_source.base_source.packages, self.UPDATED_SOURCE_PACKAGES)\n base_source = FileSystemSource.objects.get(id=self.cache_source.base_source_id)\n self.assertEqual(self.cache_source.base_source, base_source)\n\n def test_get_base_source__none(self):\n CachePackageSource.objects.get(id=self.cache_source.id).delete()\n self.assertEqual(CachePackageSource.objects.get_base_source(), None)\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"gcloud/tests/external_plugins/models/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"}
+{"seq_id":"20623174111","text":"import numpy as np\nfrom numpy.matlib import repmat\ndef initeCenter(k,dataRows,dataNumber,theMax,theMin):\n randomK = np.random.random(size=dataRows*k) #生成k个0-1随机数\n randomK=randomK.reshape((k,dataRows))\n # randomK=repmat(randomK, dataRows,1 ) #一个向量重复变成矩阵\n # randomK=randomK.T #矩阵转置,注意每一列是相同的\n center=(randomK*theMin)+theMin #生成初始聚类中心\n print(\"初始聚类中心为:\")\n print(center) #打印聚类中心\n return center\ndef findNewCenter(k,data,dataNumber,center):\n centerDist = np.ones((dataNumber, k))\n for i in range(0,k):\n dist=(data-center[i,:])**2\n dist=np.sum(dist,axis=1)\n #dist=np.power(dist,0.5)\n dist=np.sqrt(dist)\n centerDist[:,i]=dist\n marked=np.argmin(centerDist,axis=1)\n print(marked)\n oldCenter=np.array(center)\n for i in range(0, k):\n index=np.where(marked==i)\n newCenter=data[index,:]\n newCenter=np.mean(newCenter, axis=1)\n center[i,:]=newCenter\n return marked,center,oldCenter\ndef clustering(k,data,dataNumber,center,oldCenter,thresholtimes):\n #初始化\n thresholdError=0.01 #中心点迭代阈值\n realError=(center - oldCenter)\n realError=realError ** 2\n realError = np.sum(realError,axis=1 )\n realError = np.sqrt(realError)\n realError = sum(realError)\n runingTimes=1\n #循环\n while (realError>thresholdError and runingTimes 0, \"Chunksize should be a Positive Integer\"\n self._pull_chunksize = pull_chunksize\n else:\n warnings.warn(f\"`chunksize=None` assumes the operation transforms on the entire dataset at once\")\n self._pull_chunksize = self._size\n\n # If limiting documents, make sure pull chunk size is not larger than document count\n if self._limit_documents is not None and self._limit_documents < self._pull_chunksize:\n self._pull_chunksize = self.limit_documents\n\n self._output_to_status = output_to_status # Whether we should output_to_status\n self._output_documents = [] # document store for output\n\n # Empty unless documents passed into run on instead of dataset\n self._documents = DocumentList(documents)\n if len(self._documents) > 0:\n # Force output to status if running on documents\n self._output_to_status = True\n\n if operator is not None:\n self._operator = operator\n self._operators = [operator]\n else:\n self._operator = None\n self._operators = operators\n\n if filters is None:\n filters = []\n assert isinstance(filters, list), \"Filters must be applied as a list of Dictionaries\"\n\n self._refresh = refresh\n self._after_id = after_id\n\n filters = filters + self._get_refresh_filter()\n filters = filters + self._get_workflow_filter()\n\n self._filters = filters\n\n if self.documents:\n self._size = len(documents)\n else:\n self._size = dataset.len(filters=filters) if self._limit_documents is None else self._limit_documents\n\n self._successful_documents = 0\n self._success_ratio = None\n\n self._job_id = None\n self._workflow_name = None\n\n @property\n def operator(self) -> AbstractOperator:\n return self._operator\n\n @property\n def operators(self) -> Sequence[AbstractOperator]:\n return self._operators\n\n @property\n def dataset(self) -> Dataset:\n return self._dataset\n\n @property\n def pull_chunksize(self) -> int:\n return self._pull_chunksize\n\n @property\n def size(self) -> int:\n return self._size\n\n @property\n def limit_documents(self) -> int:\n return self._limit_documents\n\n @property\n def documents(self) -> List[Document]:\n return self._documents\n\n @property\n def output_to_status(self) -> bool:\n return self._output_to_status\n\n @property\n def output_documents(self) -> bool:\n return self._output_documents\n\n @property\n def success_ratio(self) -> float:\n if self._success_ratio is None:\n success_ratio = 1\n else:\n success_ratio = self._success_ratio\n return success_ratio\n\n @property\n def job_id(self):\n return self._job_id\n\n @property\n def name(self):\n return self._workflow_name\n\n def extend_output_documents(self, documents: List[Document]):\n self._output_documents.extend(documents)\n\n @abstractmethod\n def apply(self) -> None:\n raise NotImplementedError\n\n def __call__(self) -> Any:\n if self.size != 0:\n self.apply()\n self.set_success_ratio()\n\n def _operate(self, mini_batch):\n try:\n # note: do not put an IF inside ths try-except-else loop - the if code will not work\n transformed_batch = self.operator(mini_batch)\n except Exception as e:\n ic(e)\n ic({\"chunk_ids\": self._get_chunks_ids(mini_batch)})\n else:\n # if there is no exception then this block will be executed\n # we only update schema on the first chunk\n # otherwise it breaks down how the backend handles\n # schema updates\n self._successful_documents += len(mini_batch)\n return transformed_batch\n\n def _get_refresh_filter(self):\n # initialize the refresh filter container\n input_field_filters = {\"filter_type\": \"or\", \"condition_value\": []}\n\n # initialize where the filters are going\n output_field_filters = {\"filter_type\": \"or\", \"condition_value\": []}\n\n # We want documents where any of the select_fields exists\n # as these are needed for operator ...\n # We construct this as:\n #\n # (input_field1 or input_field2) and (not output_field1 or not output_field2)\n #\n # This use case here is for two input fields and two output fields\n # tho this extends to arbitrarily many.\n for field in self._select_fields:\n input_field_filters[\"condition_value\"] += self.dataset[field].exists()\n\n # ... and where any of its output_fields dont exist\n if not self._refresh:\n for operator in self.operators:\n if operator.output_fields is not None:\n for output_field in operator.output_fields:\n output_field_filters[\"condition_value\"] += self.dataset[output_field].not_exists()\n\n return [input_field_filters, output_field_filters]\n\n else:\n # Wrap in list at end\n return [input_field_filters]\n\n def _get_workflow_filter(self, field: str = \"_id\"):\n # Get the required workflow filter as an environment variable\n # WORKER_NUMBER is passed into execute function\n # total number of workers must be greater than 1 for data sharding to work\n if self.worker_number is not None and self.total_workers is not None:\n if self.total_workers > 1:\n return [{\"matchModulo\": {\"field\": field, \"modulo\": self.total_workers, \"value\": self.worker_number}}]\n return []\n\n def get_iterator(self) -> Iterator:\n if self.documents is None or len(self.documents) == 0:\n # Iterate through dataset\n iterator = self.iterate()\n else:\n # Iterate through passed in documents\n iterator = self.chunk_documents(chunksize=min(100, len(self.documents)), documents=self.documents)\n return iterator\n\n def iterate(\n self,\n filters: Optional[List[Filter]] = None,\n select_fields: Optional[List[str]] = None,\n max_retries: int = 5,\n sort: Optional[list] = None,\n include_vector: bool = True,\n random_state: int = 0,\n is_random: bool = False,\n ):\n if filters is None:\n filters = self._filters\n\n filters += self._get_workflow_filter()\n\n if select_fields is None:\n select_fields = self._select_fields\n\n retry_count = 0\n documents_processed = 0\n\n while True:\n try:\n # Adjust chunksize to get correct amount of documents\n if self.limit_documents is None or documents_processed + self.pull_chunksize < self.limit_documents:\n pull_chunksize = self._pull_chunksize\n else:\n pull_chunksize = self.limit_documents - documents_processed\n\n chunk = self._dataset.get_documents(\n page_size=pull_chunksize,\n filters=filters,\n select_fields=select_fields,\n after_id=self._after_id,\n worker_number=self.worker_number,\n sort=sort,\n include_vector=include_vector,\n random_state=random_state,\n is_random=is_random,\n )\n except (ConnectionError, JSONDecodeError) as e:\n ic(e)\n retry_count += 1\n time.sleep(1)\n\n if retry_count >= max_retries:\n raise MaxRetriesError(\"max number of retries exceeded\")\n else:\n self._after_id = chunk[\"after_id\"]\n if not chunk[\"documents\"]:\n break\n\n documents = chunk[\"documents\"]\n documents = self._filter_for_non_empty_list(documents)\n if documents:\n yield documents\n\n retry_count = 0\n # If document limit is hit, break the loop\n documents_processed += chunk[\"count\"]\n if self.limit_documents is not None and documents_processed >= self.limit_documents:\n break\n\n @staticmethod\n def chunk_documents(chunksize: int, documents: List[Document]):\n num_chunks = len(documents) // chunksize + 1\n for i in range(num_chunks):\n start = i * chunksize\n end = (i + 1) * chunksize\n chunk = documents[start:end]\n if len(chunk) > 0:\n yield chunk\n\n def update_chunk(self, chunk: List[Document], ingest_in_background: bool = True, update_schema: bool = False):\n if chunk:\n return self._dataset.update_documents(\n documents=chunk, ingest_in_background=ingest_in_background, update_schema=update_schema\n )\n\n def api_progress(\n self,\n iterator: Iterator,\n show_progress_bar: bool = True,\n n_total: int = None,\n n_passes: int = 1,\n pass_index: int = 0,\n ) -> Iterator:\n assert n_passes >= 1, \"`n_passes` must be strictly positive and greater than 0\"\n assert pass_index >= 0, \"`pass_index` must be strictly positive\"\n\n if n_total is None:\n n_total = self.size\n\n total = n_total * n_passes\n inital_value = pass_index * n_total\n self.update_progress(n_processed=inital_value, n_total=total)\n\n desc = \" -> \".join([repr(operator) for operator in self.operators])\n\n tqdm_bar = tqdm(range(total), desc=desc, disable=(not show_progress_bar), total=total)\n tqdm_bar.update(inital_value)\n\n total_so_far = 0\n for batch in iterator:\n yield batch\n api_n_processed = total_so_far + len(batch) + pass_index * n_total\n self.update_progress(n_processed=api_n_processed, n_total=total)\n total_so_far += len(batch)\n tqdm_bar.update(len(batch))\n\n def update_progress(self, n_processed: int, n_total: int = None):\n \"\"\"\n n_process: int\n the number of documents that have been processed:\n\n n_total: int\n the total number of documets to be processed\n \"\"\"\n if self.job_id:\n if n_total is None:\n n_total = self.size\n\n return self.dataset.api._update_workflow_progress(\n workflow_id=self.job_id,\n worker_number=self.worker_number,\n step=self.name,\n n_processed=n_processed,\n n_total=n_total,\n )\n\n def update_engine_props(self, job_id: str, workflow_name: str):\n self._job_id = job_id\n self._workflow_name = workflow_name\n self.dataset.api.headers.update(ai_transform_job_id=job_id, ai_transform_name=workflow_name)\n\n def set_success_ratio(self) -> None:\n if self.size:\n denominator = self.size * len(self.operators)\n self._success_ratio = self._successful_documents / denominator\n else:\n self._success_ratio = 1\n ic({\"success_ratio\": self._success_ratio})\n\n @staticmethod\n def _filter_for_non_empty_list(documents: List[Document]) -> List[Document]:\n # if there are more keys than just _id in each document\n # then return that as a list of Documents\n # length of a dictionary is just 1 if there is only 1 key\n return DocumentList([document for document in documents if len(document.keys()) > 1])\n\n @staticmethod\n def _get_chunks_ids(documents: List[Document]) -> List[Document]:\n return [document[\"_id\"] for document in documents]\n","repo_name":"RelevanceAI/ai-transform","sub_path":"ai_transform/engine/abstract_engine.py","file_name":"abstract_engine.py","file_ext":"py","file_size_in_byte":14965,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"41857722177","text":"from pathlib import Path\n\nfrom IPython.core.magic import Magics, line_magic, magics_class\nfrom IPython.core import getipython\n\nimport pandas as pd\n\nfrom .dataframe_browser import MultipleDataframeBrowser\n\n\ndef _parse_and_file_load(browser, load_func, line: str):\n parts = line.split()\n if not parts:\n raise ValueError('Must provide a file path.')\n\n path = parts[0]\n if not Path(path).exists():\n raise ValueError(f'File does not exist: {path}')\n\n name = parts[1] if len(parts) > 1 else None\n df = load_func(path)\n browser.add_df(df, name=name)\n return name\n\n\n@magics_class\nclass IpythonCli(Magics):\n\n def __init__(self, shell):\n super(IpythonCli, self).__init__(shell)\n self.browser = MultipleDataframeBrowser(ipython_session=getipython.get_ipython())\n print('try %pq or %csv, or use %show my_df')\n\n @line_magic\n def get_browser(self, _):\n \"\"\"Embed the browser in your globals so you can access it directly.\"\"\"\n globals()['browser'] = self.browser\n return self.browser\n\n @line_magic\n def fg(self, name):\n self.browser.browse(name.strip() or None)\n\n @line_magic\n def show(self, name):\n name = name.strip()\n df = get_ipython().ev(name)\n self.browser[name] = df\n self.browser.browse(name)\n\n @line_magic\n def pq(self, line):\n \"\"\"Loads a parquet file into the browser\"\"\"\n self.browser.browse(_parse_and_file_load(self.browser, pd.read_parquet, line))\n\n @line_magic\n def csv(self, line):\n \"\"\"Loads a csv file into the browser\"\"\"\n self.browser.browse(_parse_and_file_load(self.browser, pd.read_csv, line))\n\n\ndef load_ipython_extension(ipython):\n \"\"\"Load the extension in IPython.\"\"\"\n ipython.register_magics(IpythonCli)\n","repo_name":"petergaultney/df-browse","sub_path":"dfbrowse/ipython_cli.py","file_name":"ipython_cli.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"702664252","text":"\"\"\"3.Dobjunk egy dobókockával egymás után 7-szer, az eredményeket tároljuk el!\n\tSzámoljuk ki a dobások átlagát\n\tSzámoljuk meg hány hatos dobás történt\n\tSzámoljuk meg hány dobás volt páratlan\"\"\"\n\n\nfrom typing import *\nimport random\n\n\nhalmaz: List[int] = []\nelemekSzama:int = random.randint(10,20)\nosszeg:int = None\nszam:int = None\nparatlan:int = None\nhat:int = None\nlegtobbetEloforduloszamok:List[int] = []\n\ndef listaFeltoltesekRandomSzamokkal(elem:int)-> List[int]:\n eredmeny: List[int] = []\n for i in range(elem + 1):\n eredmeny.append(random.randint(1,6))\n\n return eredmeny\n\ndef halmazKiirasa(kiirandoHalmaz:List[int])-> None:\n for item in kiirandoHalmaz:\n print(f\"{item}\", end=\"\\t\")\n\ndef szamokOsszege(kiirandoHalmaz:List[int])-> int:\n eredmeny:int = 0\n for item in kiirandoHalmaz:\n eredmeny+=item\n \n return eredmeny\n\ndef paratlanSzamokSzama(kiirandoHalmaz:List[int])-> int:\n eredmeny:int = 0\n for item in kiirandoHalmaz:\n if (item % 2 == 0):\n eredmeny+=1\n\n return eredmeny\n\ndef hatosokszama(kiirandoHalmaz:List[int])-> int:\n eredmeny:int = 0\n for item in kiirandoHalmaz:\n if (item == 6):\n eredmeny+=1\n\n return eredmeny \n\n\n\n\"\"\"\ndef legtobbszam(kiirandoHalmaz:List[int])-> Dict[int,int]:\n dict:Dict[int,int]\n for item in kiirandoHalmaz:\n if(dict.has_key(szam)):\n dict[szam]+1\n else:\n dict.add(szam,1)\n \n return dict\n\"\"\"\n\ndef legnagyobbKulcsErteke(szotar: Dict[int, int])-> List[int]:\n kulcs:int = None\n ertek:int = 0\n eredmeny:List[int]=[]\n\n #a legnagyobb érték kikeresése a szótárból\n for key, value in szotar.items(): #végiglépkedünk a szótár összes elemén kulcs-érték párokkal\n if (szotar[key] > ertek):\n kulcs = key\n ertek = szotar[key]\n\n #kikeressük azokat a kulcsokat, amelyek értéke egyenlő az érték változóval, \n # mivel azoknak a kulcsoknak a száma fordul elő a legtöbbször\n for key, value in szotar.items(): \n if(szotar[key] == ertek):\n eredmeny.append(key)\n\n\n return eredmeny\n\n\ndef legtobbetEloforduloSzam(lista:List[int])-> List[int]:\n szotar: Dict[int, int] = {} #Dict[kulcs-> szám, value -> szám előfordulási száma]\n eredmeny: List[int] = []\n #meghatározzuk az előfordulási számokat\n for szam in lista:\n if(szam in szotar):\n szotar[szam]+=1 #szotar[szam] -> a kulcshoz tartozó értéket adja vissza\n else:\n szotar[szam] = 1\n\n #lista = [2,4,1,1,6,3,1]\n #szotar = {1:3, 2:1, 3:1, 4:1, 6:1}\n eredmeny = legnagyobbKulcsErteke(szotar)\n return eredmeny\n\n#főprogram\nhalmaz = listaFeltoltesekRandomSzamokkal(elemekSzama)\nhalmazKiirasa(halmaz)\n\nosszeg = szamokOsszege(halmaz)\nprint(f\"\\nA számok átlaga: {osszeg / elemekSzama}\")\n\nparatlan = paratlanSzamokSzama(halmaz)\nprint(f\"\\nA páratlan számok száma: {paratlan}\")\n\nhat = hatosokszama(halmaz)\nprint(f\"\\nHatos dobások száma: {hat} \")\n\nlegtobbetEloforduloszamok = legtobbetEloforduloSzam(halmaz)\nprint(f\"A legtöbbet előforduló szám(ok): \")\nprint(legtobbetEloforduloszamok)","repo_name":"T0m134/Python","sub_path":"09-Halmazok/Feladat 3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17986255988","text":"import time \r\n\r\ngmt = eval(input(\"Enter the time zone offset to GMT (e.g. +5) : \"))\r\n\r\n# converting gmt to hours \r\n\r\n# getting hours from gmt \r\ngmthours = int(gmt)\r\ngmtminute = ((gmt*100)%100)/60\r\n\r\nagmt = gmthours+gmtminute\r\n\r\n# converting gmt hours to second\r\ngmtsecond = agmt*3600\r\ncurrentTime = time.time()\r\nactualSecond = currentTime + gmtsecond\r\n# get second \r\nsecond = int(actualSecond)\r\n#get minute \r\nminute = second//60\r\n#get remaining second \r\nrsecond = second%60 \r\n#get hours \r\nhours = minute//60\r\n#current hour \r\ncurrentHours = hours%24\r\n# get remaining minutes \r\nrminute = minute%60 \r\n\r\nprint(\"{}:{}:{} GMT\".format(currentHours,rminute,rsecond))","repo_name":"Yuyi-hao/python-book-solution-","sub_path":"chapter 2/q18.py","file_name":"q18.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5947655949","text":"welcome_message = 'Welcome to Spotygo Bot!\\n' \\\r\n 'This bot allows you to download your favourite Spotify playlist in just a couple clicks!\\n\\n' \\\r\n 'To start - send me a link to your Spotify Playlist!\\n\\n' \\\r\n 'Note: you can\\'t download Dynamic playlists like \"Top 50 songs for you\" because of security reasons. \\n\\n' \\\r\n 'If you have any questions type /help'\r\n\r\nhelp_message = 'Welcome to Spotygo Bot!\\n' \\\r\n 'This Help section will answer some of your common questions.\\n\\n' \\\r\n '1. What playlists can I download?\\n' \\\r\n 'Any Spotify playlist which is not hidden from public.\\n' \\\r\n 'Dynamic playlists like \"Top 50 songs for you\" are not available,\\n' \\\r\n 'because Spotify services require your authorization for getting these playlists data.\\n' \\\r\n '2. How to start?\\n' \\\r\n 'To initiate the process just send SpotyGo the link to your playlist.\\n' \\\r\n 'The link should look like this https://open.spotify.com/...\\n\\n' \\\r\n '3. What will happen next?\\n' \\\r\n 'You will receive ZIP archive with all of your songs in mp4 format. ' \\\r\n 'It may take some time to download,\\n' \\\r\n 'so wait a bit while our services work for you.\\n\\n' \\\r\n '4. Why some of my songs are missing?\\n' \\\r\n 'Due to policy of some songs, they could be age restrited or not available for download.\\n' \\\r\n 'Usually it is not more than 1-2% of total playlist size.\\n\\n' \\\r\n '5. Is it safe?\\n' \\\r\n 'Yes, it is totally safe. We don\\'t request any user data other than playlist link.'\r\n\r\nsuccessful_download_message = 'Congrats!\\n' \\\r\n 'Download was successful!\\n\\n' \\\r\n 'To download another playlist, send new link to SpotyGo.'\r\n\r\nerror_incorrect_link_message = 'Sorry but link you sent is not valid.\\n\\n' \\\r\n 'Please send link that looks like this:\\n\\n' \\\r\n 'https://open.spotify.com/...\\n\\n' \\\r\n 'For FAQ type /help'\r\n\r\nerror_no_link_message = 'Sorry but the message you sent is not a link.\\n\\n' \\\r\n 'Please send link that looks like this:\\n\\n' \\\r\n 'https://open.spotify.com/...\\n\\n' \\\r\n 'For FAQ type /help'\r\n\r\nerror_one_link_message = 'You can download only 1 playlist at a time.\\n\\n' \\\r\n 'For FAQ type /help'\r\n\r\nrestriction_message = 'You\\'ve reached your limit of downloads.\\n' \\\r\n 'We set limit to 2 playlists per week, as this helps to reduce SpotyGo load.\\n\\n' \\\r\n 'If you would like to increase your limit,\\n' \\\r\n 'please contact SpotyGo.'\r\n\r\ninitialize_message = 'Working on it...'\r\n\r\nfile_error_message = 'Sorry, there was an error while sending file.'\r\n\r\n\r\nmy_messages = {'welcome_message': welcome_message,\r\n 'help_message': help_message,\r\n 'successful_download_message': successful_download_message,\r\n 'error_one_link_message': error_one_link_message,\r\n 'error_no_link_message': error_no_link_message,\r\n 'error_incorrect_link_message': error_incorrect_link_message,\r\n 'restriction_message': restriction_message,\r\n 'initialize_message': initialize_message,\r\n 'file_error_messagee': file_error_message}\r\n","repo_name":"romatv/telegram-project","sub_path":"telegram_bot_messages.py","file_name":"telegram_bot_messages.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14194386258","text":"\nfrom itertools import combinations, product\nfrom functools import reduce\n\ndef kmer_mismatches(kmer, d):\n \"\"\"Returns all k-mers that are within d mismatches of the given k-mer.\"\"\"\n mismatches = [kmer] # Initialize mismatches with the k-mer itself (i.e. d=0).\n alt_bases = {'A':'CGT', 'C':'AGT', 'G':'ACT', 'T':'ACG'}\n for dist in range(1, d+1):\n for change_indices in combinations(range(0,len(kmer)), dist):\n for substitutions in product(*[alt_bases[kmer[i]] for i in change_indices]):\n new_mistmatch = list(kmer)\n for idx, sub in zip(change_indices, substitutions):\n new_mistmatch[idx] = sub\n mismatches.append(''.join(new_mistmatch))\n return mismatches\n\n\ndef MotifEnumeration(DNA_set,k,d):\n\tmotif_sets = [{kmer for i in range(0,len(dna)-k+1) for kmer in kmer_mismatches(dna[i:i+k], d)} for dna in DNA_set]\n\n\treturn sorted(list(reduce(lambda a,b: a & b, motif_sets)))\n\n\nwith open(\"input_1.txt\",\"r\") as file:\n\tdata = file.readlines()\n\tDNA_set = data[1].strip().split()\n\tk, d = data[0].strip().split()\n\nprint(k,d)\nprint(DNA_set)\n\nMotifs = MotifEnumeration(DNA_set,int(k),int(d))\n\nprint(*Motifs,sep=\" \")","repo_name":"neuwirtt/Bioinformatics_I","sub_path":"MotifEnumeration.py","file_name":"MotifEnumeration.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74502967206","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 4 22:47:44 2017\n\n@author: RizMac\n\nRandom Forest Classifier\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier as rcf\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import f1_score as fscore\nimport HelperFunctions as hf\n\n\n#load data and labels\nX = pd.read_csv('../DataSet/normalized_data.csv') #features\nY = pd.read_csv('../DataSet/valid_label.csv') #win rate \n\n#Binarize label\nthreshold_win_rate = 0.55\nY = hf.binarizeLabel(Y, threshold_win_rate)\n\n##############################################################################\n# Find best features\nT = 1000 # of trials\n\nbest_score = 0.0\nbest_weights = None \n\nfor i in range(T):\n #Do K-Fold validation \n k = 10\n kf = KFold(n_splits=k) \n \n \n \n for train_index, test_index in kf.split(X):\n #get training and test sets\n X_train = X.iloc[train_index]\n X_test = X.iloc[test_index]\n Y_train = Y.iloc[train_index]\n Y_test = Y.iloc[test_index]\n \n classifier = rcf()\n classifier.fit(X_train, Y_train.as_matrix().ravel())\n \n Y_predict = classifier.predict(X_test)\n score = fscore(Y_test, Y_predict)\n \n if score > best_score:\n best_score = score\n best_weights = classifier.feature_importances_\n\n\n###############################################################################\n# Print results\n\nprint('Random Forest')\nprint('F-Score: {}'.format(best_score))\nhf.printParameterRankings(best_weights, hf.columns)\nprint('\\n')\n \n \n\n","repo_name":"RizRam/DOTA-2-Machine-Learning-Project","sub_path":"Dota2ML/RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70795660969","text":"import unittest\nimport time\nimport logging\nimport os\n\nimport moviepy.editor as moviepy\n\nfrom video.audio import AudioStreaming\nfrom video.managing import VideoManaging\n\nFILE_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# logging config\nlogging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\n\n\nclass ManageAudioTest(unittest.TestCase):\n\n logging.info(\"Preparing set up test for Manage Audio\")\n\n def setUp(self):\n\n RATE = 16000\n CHUNK = 1024*100\n\n audio_path = os.path.join(FILE_PATH, 'samples/showgirls.wav')\n audio = moviepy.AudioFileClip(audio_path)\n\n with AudioStreaming(audio_path, RATE,\n CHUNK, audio.duration) as stream:\n self.audio_generator = stream.generator()\n\n def test_streaming_audio(self):\n\n logging.info(\"Streaming audio test\")\n\n # test streaming\n for value in self.audio_generator:\n self.assertIsNotNone(value)\n return\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"danhidsan/movie-trailer-classifier","sub_path":"test/test_manage_audio.py","file_name":"test_manage_audio.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"39899055099","text":"'''\nShows the dependence of T1 on sample concentration by plotting the inverse of T1 versus concentration\n'''\nimport numpy as np\nimport sympy as sm\nimport matplotlib.pyplot as plt\n\nsm.init_printing(use_latex=True, use_unicode=True)\n\nT_ones = np.array([3846.361, 456.563, 450.840, 141.635, 70.924])\nConc = np.array([0.0, 1.25, 2.5, 5.0, 10.0])\nplt.figure()\nplt.plot(Conc, 1./T_ones,'bo',ls='--')\nplt.ylabel('1 / $T_1$ (ms$^{-1}$)')\nplt.xlabel('Concentration (mM)')\nplt.show()\n","repo_name":"psanker/advanced-lab","sub_path":"nmr/plot_T1_vs_concentration.py","file_name":"plot_T1_vs_concentration.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"42084230932","text":"import time\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\n\nwarnings.filterwarnings('ignore')\n\nnames = [\n \"Nearest Neighbors\",\n \"Linear SVM\",\n \"RBF SVM\",\n \"Gaussian Process\",\n \"Decision Tree\",\n \"Random Forest\",\n \"AdaBoost\",\n \"Naive Bayes\",\n \"QDA\",\n]\n\nclassifiers = [\n KNeighborsClassifier(),\n SVC(kernel=\"linear\"),\n SVC(),\n GaussianProcessClassifier(1.0 * RBF(1.0)),\n DecisionTreeClassifier(),\n RandomForestClassifier(),\n AdaBoostClassifier(),\n GaussianNB(),\n QuadraticDiscriminantAnalysis(),\n]\n\n\ndef grid_function(data, target, name, classifier, param_grid, x_test, y_test):\n x = data.drop(target, axis=1).values\n y = np.ravel(data[target])\n\n classifier = Pipeline([('scale', StandardScaler()),\n ('clf', classifier)])\n grid = GridSearchCV(classifier, param_grid=param_grid, scoring='f1_macro', cv=5)\n grid.fit(x, y)\n train_score = grid.score(x, y)\n print(grid.best_estimator_, train_score)\n score = grid.score(x_test, y_test)\n print('score', score)\n\n with open('grid_results.txt', 'a') as f:\n f.write(f'Classifier {name}')\n f.write(f'\\nParameters: {grid.best_params_}')\n f.write(f'\\nBest score: {train_score}\\n')\n\n return grid.best_estimator_, train_score, score\n\n\nkn_params_grid = {'clf__algorithm': ['ball_tree', 'kd_tree', 'brute'],\n 'clf__n_neighbors': [3, 5, 10],\n 'clf__weights': ['uniform', 'distance'],\n 'clf__leaf_size': [20, 30, 40],\n 'clf__p': [1, 2]}\nlinear_svm_params_grid = {'clf__C': [0.01, 1, 1000],\n 'clf__gamma': ['scale', 'auto'],\n 'clf__tol': [0.0001, 0.001, 0.01],\n 'clf__class_weight': [None, 'balanced']}\nrbf_svm_params_grid = {'clf__C': [0.01, 1, 1000],\n 'clf__gamma': ['scale', 'auto'],\n 'clf__tol': [0.0001, 0.001, 0.01],\n 'clf__class_weight': [None, 'balanced']}\ngauss_process_params_grid = {'clf__n_restarts_optimizer': [0, 1, 2],\n 'clf__max_iter_predict': [50, 100, 200]}\ndec_tree_params_grid = {'clf__criterion': ['gini', 'entropy', 'log_loss'],\n 'clf__splitter': ['best', 'random'],\n 'clf__min_samples_split': [2, 3],\n 'clf__max_features': ['sqrt', 'log2', None],\n 'clf__class_weight': [None, 'balanced']}\nrand_forest_params_grid = {'clf__n_estimators': [50, 100, 200],\n 'clf__criterion': ['gini', 'entropy', 'log_loss'],\n 'clf__min_samples_split': [2, 3],\n 'clf__max_features': ['sqrt', 'log2', None],\n 'clf__class_weight': [None, 'balanced', 'balanced_subsample']}\nada_boost_params_grid = {'clf__n_estimators': [25, 50, 100],\n 'clf__learning_rate': [0.1, 1, 10]}\ngauss_nb_params_grid = {'clf__var_smoothing': [1e-10, 1e-9, 1e-8]}\nqda_params_grid = {}\n\nparams = [\n kn_params_grid,\n linear_svm_params_grid,\n rbf_svm_params_grid,\n gauss_process_params_grid,\n dec_tree_params_grid,\n rand_forest_params_grid,\n ada_boost_params_grid,\n gauss_nb_params_grid,\n qda_params_grid,\n]\n\n# target = 'Event'\n# train_data = pd.read_csv('../original_datasets/train_datasets/ahmad_train.csv')\n# test_data = pd.read_csv('../original_datasets/test_datasets/ahmad_test.csv')\n\n# target = 'HeartDisease'\n# train_data = pd.read_csv('../original_datasets/train_datasets/uci_factorized_train.csv')\n# test_data = pd.read_csv('../original_datasets/test_datasets/uci_factorized_test.csv')\n\ntarget = 'RiskLevel'\ntrain_data = pd.read_csv('../original_datasets/train_datasets/maternal_factorized_train.csv')\ntest_data = pd.read_csv('../original_datasets/test_datasets/maternal_factorized_test.csv')\n\nx_test = test_data.drop(target, axis=1)\ny_test = np.ravel(test_data[target])\n\nfor name, clf, param_grid in zip(names, classifiers, params):\n start_time = time.time()\n estimator, estimator_best_score, best_score = grid_function(data=train_data, target=target, name=name,\n classifier=clf,\n param_grid=param_grid, x_test=x_test, y_test=y_test)\n\n end_time = time.time()\n\n with open('classification.txt', 'a') as res:\n res.write(f'Classifier: {name}')\n res.write(f'\\nScore on train data: {estimator_best_score}')\n res.write(f'\\nScore on test data: {best_score}\\n')\n res.write(f'Execution time: {end_time - start_time}\\n\\n')\n","repo_name":"EnZaNin/SyntheticDataGenerator","sub_path":"classification/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17948423825","text":"import pygame\nimport character_classes\nfrom dialogue import Dialogue, DialogueManager\nimport mini_1, mini_2, mini_3, mini_4, mini_5, mini_6, mini_7, mini_8, mini_9, mini_10, mini_11, mini_12, mini_13, mini_14, mini_15\nimport global_constants\nimport sys\nimport time\n\n# Scenes\nscenes = {\n 1: mini_1.menu,\n 2: mini_2.menu,\n 3: mini_5.menu,\n 4: mini_4.menu,\n 5: mini_6.menu,\n 6: mini_3.menu,\n 7: mini_7.menu,\n 8: mini_8.menu,\n 9: mini_9.menu,\n # 10: mini_10.menu,\n # 11: mini_11.menu,\n # 12: mini_12.menu,\n # 13: mini_13.menu,\n # 14: mini_14.menu,\n # 15: mini_15.menu,\n} #etc\n\n# Global Constants\nplayedLevel = False\nmini_level = 0\n\n# Game Loop\ndef main():\n global playedLevel, mini_level\n\n # Initialize Pygame\n pygame.init()\n\n # Set up the screen\n screen = pygame.display.set_mode((global_constants.SCREEN_WIDTH, global_constants.SCREEN_HEIGHT))\n pygame.display.set_caption(\"The Missing Sock\")\n\n # Set up the player\n player = character_classes.Player(['./sprites/mainsock-1.png'])\n socks = [\n character_classes.NPC(['./sprites/sidesock1-1.png', './sprites/sidesock1-2.png'], 400, 20),\n character_classes.NPC(['./sprites/sidesock2-1.png', './sprites/sidesock2-2.png'], 300, 120),\n character_classes.NPC(['./sprites/sidesock3-1.png', './sprites/sidesock3-2.png'], 240, 180),\n character_classes.NPC(['./sprites/sidesock4-1.png', './sprites/sidesock4-2.png'], 300, 320),\n character_classes.NPC(['./sprites/sidesock5-1.png', './sprites/sidesock5-2.png'], 400, 420),\n character_classes.NPC(['./sprites/sidesock6-1.png', './sprites/sidesock6-2.png'], 464, 326),\n character_classes.NPC(['./sprites/sidesock7-1.png', './sprites/sidesock7-2.png'], 500, 320),\n character_classes.NPC(['./sprites/sidesock8-1.png', './sprites/sidesock8-2.png'], 550, 220),\n character_classes.NPC(['./sprites/sidesock9-1.png', './sprites/sidesock9-2.png'], 550, 520)\n ]\n\n # Create dialogue instance\n dialogue_manager = DialogueManager()\n current_dialogue = dialogue_manager.activateDialogue()\n current_dialogue.started = True\n current_dialogue.is_active = True\n\n start_dialogue = Dialogue([\"[Press Space To Talk]\"])\n current_dialogue.started = True\n\n def checkSocks():\n for index, sock in enumerate(socks):\n if sock.handle_input(player):\n if not current_dialogue.is_active:\n return (True, index)\n\n return (False, -1)\n\n foundSocks = (False, -1)\n\n pygame.mixer.init()\n pygame.mixer.music.load(\"./music/Eli-Game-2-Song.ogg\") \n pygame.mixer.music.play(-1,0.0)\n\n while True:\n # Handle events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == pygame.K_SPACE:\n if current_dialogue.is_active and not current_dialogue.done:\n current_dialogue.next_line()\n elif not current_dialogue.is_active and foundSocks[0]:\n current_dialogue = dialogue_manager.activateDialogue()\n current_dialogue.is_active = True\n playedLevel = False\n\n # check if dialogue is done\n if current_dialogue.done and playedLevel == False and dialogue_manager.dialogue_index % 2 == 0:\n if (mini_level > 9):\n pass\n elif (mini_level + 1 == 1 \n or mini_level + 1 == 7 \n or mini_level + 1 == 10 \n or mini_level + 1 == 11\n or mini_level + 1 == 12 \n or mini_level + 1 == 14 \n or mini_level + 1 == 15 \n or mini_level + 1 == 13):\n scenes[mini_level + 1](0)\n time.sleep(0.8)\n else:\n scenes[mini_level + 1]()\n \n socks.remove(socks[foundSocks[1]])\n mini_level += 1\n playedLevel = True\n current_dialogue = dialogue_manager.activateDialogue()\n current_dialogue.is_active = True\n\n if current_dialogue.is_active == False and foundSocks[0]:\n start_dialogue.is_active = True\n else:\n start_dialogue.is_active = False\n\n foundSocks = checkSocks()\n\n # Handle player movement\n player.handle_input()\n \n # Update the screen\n screen.fill((255, 255, 255))\n\n for sock in socks: # Rendering the socks\n sock_sprite = sock.get_current_sprite()\n screen.blit(sock_sprite, (sock.x, sock.y))\n\n current_sprite = player.get_current_sprite()\n screen.blit(current_sprite, (player.x, player.y))\n\n if current_dialogue.is_active: # Render the dialogue\n current_dialogue.render(screen) \n\n if start_dialogue.is_active:\n start_dialogue.render(screen) \n\n pygame.display.flip()\n\n # Limit the frame rate\n pygame.time.Clock().tick(60)\n\nmain()","repo_name":"Eli6th/AngelHacksBoston2023","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42885242928","text":"\n# coding: utf-8\n\n# In[17]:\n\n\nimport scipy.integrate as sci\nimport numpy as np\nimport math\nfrom matplotlib import pyplot\n\nN = 80\nT1 = []\nT2 = []\nT3 = []\nA = np.linspace(0.1,4,N)\n\nfor i in A:\n dT = lambda b:((2*2**0.5)/(i**4-b**4)**0.5)\n b = np.linspace(0,i,N,endpoint=False)\n dT1vec = np.vectorize(dT)\n dT1_b = dT1vec(b)\n T1.append(sci.trapz(dT1_b, b)) # calculate time period by Trapezoidal integration method\n T2.append(sci.quad(dT,0,i)[0]) # calculate time period by normal python integration\n T3.append(sci.simps(dT1_b, b)) # calculate time period by Simpson's rule\n \n \npyplot.plot(b,T1,'g*-', label =\"Time period by Trapezoidal rule\")\npyplot.plot(b,T2,'r--', label =\"Time period by normal python integration rule\")\npyplot.plot(b,T3,'b--', label =\"Time period by Simpson's rule\")\npyplot.xlabel('Amplitude')\npyplot.ylabel('Period')\npyplot.title(\"T vs. A by different integration methods\")\npyplot.legend()\n\n","repo_name":"Phys7321/hw02-integrations-aninditamaiti","sub_path":"python/Challenge_problem.py","file_name":"Challenge_problem.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"362712851","text":"# _*_ coding:utf-8 _*_\n# Created by lzl\n# TIME: 2021/2/25 13:52\n'''\n python函数篇,介绍python初级函数、高级函数、函数式编程等进阶知识\n'''\n\n# 函数篇\n'''\n 内置函数:abs\n help(函数名),需要注意传入的参数个数、类型\n 定义函数:def 函数名(参数):\n return 值\n 导入函数:from 文件名 import 函数名\n import math\n 数据类型检查:isinstance()\n 如:if not isinstance(x,(int,float)):\n raise TypeError('bad operated type')\n 位置参数,如x的平方,但不影响默认值,默认值必须是不可变对象\n def power(x,n=2):\n s = 1 \n while n > 0:\n n = n - 1\n s = s * x\n return s\n def calc(*numbers): 可变参数,可以传入任意个参数,包括0个参数,参数其实自动组装成一个tuple\n def calc(**numbers): 参数自动组装成一个dict\n def calc(**kw): 其中kw是需要在函数内部检查,命名关键字参数是需要一个特殊分隔符*\n 如: def person(name,age,**kw):\n if 'city' in kw:\n pass\n if 'job' in kw:\n pass\n print('name:',name,'age:',age,'other:',kw)\n 参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数\n def f1(a, b, c=0, *args, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)\n\n def f2(a, b, c=0, *, d, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)\n >>> f1(1, 2, 3, 'a', 'b', x=99)\n a = 1 b = 2 c = 3 args = ('a', 'b') kw = {'x': 99}\n\n 递归函数:\n 调��函数本身,如阶乘,需要注意栈溢出,调用次数需要收到限制,尾递归优化,return语句不能包含表达式自身,只会占用一个栈帧\n\n fact(n) = n * fact(n-1)\n 原:def fact(n):\n if n==1:\n return 1\n return n * fact(n - 1)\n 尾递归优化:每次只返回递归函数本身,不含表达式\n def fact(n):\n return fact_iter(n, 1)\n\n def fact_iter(num, product):\n if num == 1:\n return product\n return fact_iter(num - 1, num * product)\n'''\n\n\n# def power(x, n=2):\n# s = 1\n# while n > 0:\n# n = n - 1\n# s = s * x\n# return s\n#\n#\n# print(power(5, 2))\n# print(power(5))\n# print(power(5, 3))\n\n'''\n # 高级特性\n 切片:取片段都可以采用切片,负责取指定索引范围的操作\n L[0:3] 取0,1,2索引数据\n L[:3] 同上\n L[::] 取所有\n L[-2:] 去后两个,后面开始索引获取,第一个数索引是-1\n range() 自带创建序列数方法\n L[1:10:2] 1-10的按照步长2获取\n 切片适合数据类型:list,tuple,字符串等\n \n 迭代:通过for循环遍历对象的过程就称为迭代\n for key in d: 迭代一个变量\n for i, value in enumerate(['A', 'B', 'C']) 迭代两个变量,其中enumerate函数把一个list变成索引-元素对\n isinstance('abc',Iterable) 判断对象是否是可迭代对象\n isinstance('abc',Iterator) 判断对象是否是迭代器\n \n 列表生成式:使用表达式+for的形式表示\n [x * x for x in range(1:10)]\n d = {'x': 'A', 'y': 'B', 'z': 'C'}\n [k + '=' + v for k,v in d.items()] 包含两个或者多个变量的列表生成式\n [x for x in range(1,11) if x % 2 = 0 ] 穿插条件 if\n [ x if x % 2 = 0 else -x for x in range(1,11)] 穿插条件 if... else\n \n 生成器:只需要将列表生成器[]编程()就可以,机制是一遍循环一遍计算,惰性计算,返回的是一个算法,或者说是数据流\n 通过next()获得generator返回值,超过范围报StopIteration异常\n 含有yield的都可以看做是生成器\n \n 迭代器:直接作用关于for循环的对象是可迭代对象 Iterable\n 可以被next()函数调用并不断返回下一个值的对象称为迭代器:Iterator\n 生成器都是Iterator,但是list,str,dict不是Iterator,可以通过调用iter()方法成为Iterator\n Iterator甚至可以表示一个无限大的数据流 \n \n \n # 函数式编程\n 高阶函数:函数本身可以作为变量,指向另一个函数的\n map/reduce、filter、sorted\n map/reduce:格式相差无几,只是一个是单体计算,一个是累积计算\n map: f(x) = x * x; 表示为: r = map(f,[1,2,3,4,5]) 最后形成是一个Iterator,需要list()返回整个序列\n list(r) 最后结果为一个list\n reduce(f, [x1, x2, x3, x4]) = f(f(f(x1, x2), x3), x4),就是累积计算\n \n filter :用于过滤序列,思路是先给定序列,然后定义筛选函数,最后定一个生成器,不断返回过滤结果\n 以获取1000 以内的素数为例:\n def _odd_iter():\n n = 1\n while True:\n n = n + 2 \n yield n\n \n def _not_divisible(n):\n return lambda x: x % n > 0 \n \n def primes():\n yield 2\n it = _odd_iter() # 初始序列\n while True :\n n = next(it) # 返回序列的第一个数\n yield n \n it = filter(_not_divisible(n),it)\n \n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n \n sorted:函数抽象的方式\n sorted([36, 5, -12, 9, -21])\n sorted([36, 5, -12, 9, -21], key=abs)\n sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower)\n sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower, reverse=True)\n \n 返回函数:可变参数求和为例(不需要立刻求和,只需要返回求和函数)\n def lazy_sum(*args):\n def sum():\n ax = 0\n for n in args:\n ax = ax + n\n return ax\n return sum\n f = lazy_sum(1, 3, 5, 7, 9) 返回的是函数,这种方式成为“闭包”\n f() 才返回的是求和结果\n \n 匿名函数:正则表达式就是匿名函数的经典表示法,不直接显示定义函数 lambda\n list(map(lambda x: x * x ,[1,2,3,4,5]))\n \n 装饰器:函数对象自带类似__name__属性,想在代码运行期间动态增加功能,不改变函数定义的方式\n 其本质上是返回函数的高阶函数\n def log(func):\n def wrapper(*args, **kw):\n print('call %s():' % func.__name__)\n return func(*args, **kw)\n return wrapper\n @log\n def now():\n print('2015-3-25')\n now()后就会在前面先打印log\n \n 偏函数:可以做到如同设定参数默认值,降低函数调用的难度作用\n # 二进制转换,functools.partial的功能就是这个,对于参数个数太多,就可以依据这个创建新函数,固定部分参数\n def int2(x ,base=2):\n return int(x,base)\n'''\n\n# 求1000以内素数\ndef _odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter() # 初始序列\n while True:\n n = next(it) # 返回序列的第一个数\n yield n\n it = filter(_not_divisible(n), it)\n\n\nfor n in primes():\n if n < 1000:\n print(n)\n else:\n break","repo_name":"yidaodashi/pythonSkills","sub_path":"pythonBaseTrainning/pythonLiaoxuefeng/hardKnowlodge/functionGroup.py","file_name":"functionGroup.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9888392857","text":"\"\"\"add in progress step table\n\nRevision ID: 5771160a95ad\nRevises: d3a4c9e87af3\nCreate Date: 2023-04-10 19:26:23.232433\n\n\"\"\"\nimport sqlalchemy as db\nfrom alembic import op\nfrom dagster._core.storage.migration.utils import has_index, has_table\nfrom dagster._core.storage.sql import get_current_timestamp\nfrom sqlalchemy.dialects import sqlite\n\n# revision identifiers, used by Alembic.\nrevision = \"5771160a95ad\"\ndown_revision = \"d3a4c9e87af3\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n if not has_table(\"concurrency_slots\"):\n op.create_table(\n \"concurrency_slots\",\n db.Column(\n \"id\",\n db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"),\n primary_key=True,\n autoincrement=True,\n ),\n db.Column(\"concurrency_key\", db.Text, nullable=False),\n db.Column(\"run_id\", db.Text),\n db.Column(\"step_key\", db.Text),\n db.Column(\"deleted\", db.Boolean, nullable=False, default=False),\n db.Column(\"create_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n )\n\n if not has_table(\"pending_steps\"):\n op.create_table(\n \"pending_steps\",\n db.Column(\n \"id\",\n db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"),\n primary_key=True,\n autoincrement=True,\n ),\n db.Column(\"concurrency_key\", db.Text, nullable=False),\n db.Column(\"run_id\", db.Text),\n db.Column(\"step_key\", db.Text),\n db.Column(\"priority\", db.Integer),\n db.Column(\"assigned_timestamp\", db.DateTime),\n db.Column(\"create_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n )\n op.create_index(\n \"idx_pending_steps\",\n \"pending_steps\",\n [\"concurrency_key\", \"run_id\", \"step_key\"],\n mysql_length={\"concurrency_key\": 255, \"run_id\": 255, \"step_key\": 32},\n unique=True,\n )\n\n\ndef downgrade():\n if has_table(\"concurrency_slots\"):\n op.drop_table(\"concurrency_slots\")\n\n if has_table(\"pending_steps\"):\n if has_index(\"pending_steps\", \"idx_pending_steps\"):\n op.drop_index(\"idx_pending_steps\", \"pending_steps\")\n op.drop_table(\"pending_steps\")\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_core/storage/alembic/versions/040_add_in_progress_step_table.py","file_name":"040_add_in_progress_step_table.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"}
+{"seq_id":"39154869352","text":"\"\"\"Prep 8 Synthesize\n\n=== CSC148 Winter 2020 ===\nDepartment of Mathematical and Computational Sciences,\nUniversity of Toronto Mississauga\n\n=== Module Description ===\nYour task in this prep is to implement each of the unimplemented Tree methods\nin this file.\nThe starter code has a recursive template that includes the \"size-one\" case;\nyou may or may not choose to use this in your final implementations.\n\"\"\"\nfrom __future__ import annotations\nfrom typing import Any, List, Optional\n\n\nclass Tree:\n \"\"\"A recursive tree data structure.\n\n Note the relationship between this class and RecursiveList; the only major\n difference is that _rest has been replaced by _subtrees to handle multiple\n recursive sub-parts.\n \"\"\"\n # === Private Attributes ===\n # The item stored at this tree's root, or None if the tree is empty.\n _root: Optional[Any]\n # The list of all subtrees of this tree.\n _subtrees: List[Tree]\n\n # === Representation Invariants ===\n # - If self._root is None then self._subtrees is an empty list.\n # This setting of attributes represents an empty Tree.\n #\n # Note: self._subtrees may be empty when self._root is not None.\n # This setting of attributes represents a tree consisting of just one\n # node.\n\n def __init__(self, root: Any, subtrees: List[Tree]) -> None:\n \"\"\"Initialize a new Tree with the given root value and subtrees.\n\n If is None, the tree is empty.\n Precondition: if is None, then is empty.\n \"\"\"\n self._root = root # Sets the root of the Tree to be root\n self._subtrees = subtrees # Sets the subtree of the Tree to be the list\n # of trees\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of this tree.\n \"\"\"\n return self._str_indented(0)\n\n def is_empty(self) -> bool:\n \"\"\"Return True if this tree is empty.\n\n >>> t1 = Tree(None, [])\n >>> t1.is_empty()\n True\n >>> t2 = Tree(3, [])\n >>> t2.is_empty()\n False\n \"\"\"\n return self._root is None # no root => return empty\n\n def __len__(self) -> int:\n \"\"\"Return the number of items contained in this tree.\n\n >>> t1 = Tree(None, [])\n >>> len(t1)\n 0\n >>> t2 = Tree(3, [Tree(4, []), Tree(1, [])])\n >>> len(t2)\n 3\n \"\"\"\n # BASE CASE - TREE IS EMPTY\n if self.is_empty(): # self is a leaf => return 0 (leaf has no subtrees)\n return 0 # do not increase the count\n size = 1 # count the root\n for subtree in self._subtrees: # iterate through the subtrees of self\n size += subtree.__len__() # could also do len(subtree) here\n # increase size by the length of the subtree\n return size\n\n def num_positives(self) -> int:\n \"\"\"Return the number of positive integers in this tree.\n\n Precondition: all items in this tree are integers.\n\n Remember, 0 is *not* positive.\n\n >>> t1 = Tree(17, [])\n >>> t1.num_positives()\n 1\n >>> t2 = Tree(-10, [])\n >>> t2.num_positives()\n 0\n >>> t3 = Tree(1, [Tree(-2, []), Tree(10, []), Tree(-30, [])])\n >>> t3.num_positives()\n 2\n \"\"\"\n if self.is_empty():\n return 0\n num_pos = 0\n if self._root > 0:\n num_pos += 1\n for subtree in self._subtrees:\n num_pos += subtree.num_positives()\n return num_pos\n\n def maximum(self: Tree) -> int:\n \"\"\"Return the maximum item stored in this tree.\n\n Return 0 if this tree is empty.\n\n Precondition: all values in this tree are positive integers.\n\n >>> t1 = Tree(17, [])\n >>> t1.maximum()\n 17\n >>> t3 = Tree(1, [Tree(-2, []), Tree(10, []), Tree(-30, [])])\n >>> t3.maximum()\n 10\n \"\"\"\n # BASE CASE - IF TREE IS EMPTY\n if self.is_empty():\n return 0\n largest = self._root\n for subtree in self._subtrees:\n contender = subtree.maximum()\n if contender > largest:\n largest = contender\n return largest\n\n def height(self: Tree) -> int:\n \"\"\"Return the height of this tree.\n\n Please refer to the prep readings for the definition of tree height.\n\n >>> t1 = Tree(17, [])\n >>> t1.height()\n 1\n >>> t2 = Tree(1, [Tree(-2, []), Tree(10, []), Tree(-30, [])])\n >>> t2.height()\n 2\n \"\"\"\n if self.is_empty():\n return 0\n if not self._subtrees: # if self is a leaf and leaf.subtrees == []\n return 1\n depths = []\n tallest = 0\n for subtree in self._subtrees:\n depths.append(subtree.height() + 1)\n if depths:\n tallest = max(depths)\n return tallest\n\n def __contains__(self, item: Any) -> bool:\n \"\"\"Return whether this tree contains - .\n\n >>> t = Tree(1, [Tree(-2, []), Tree(10, []), Tree(-30, [])])\n >>> t.__contains__(-30) # Could also write -30 in t.\n True\n >>> t.__contains__(148)\n False\n \"\"\"\n if self.is_empty(): # If root is None return False\n return False\n occurs = False # Occurs flag set to false\n if self._root == item: # If the root of the current node is equal to the item\n return True # Item is in the tree => return True\n\n for subtree in self._subtrees:\n # if self._root == item (it is contained)\n if subtree.__contains__(item):\n # Change the occurs flag to True\n occurs = True\n break\n return occurs\n\n def _str_indented(self, depth: int) -> str:\n \"\"\"Return an indented string representation of this tree.\n\n The indentation level is specified by the parameter.\n \"\"\"\n if self.is_empty():\n return ''\n else:\n s = ' ' * depth + str(self._root) + '\\n'\n for subtree in self._subtrees:\n # Note that the 'depth' argument to the recursive call is\n # modified.\n s += subtree._str_indented(depth + 1)\n return s\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n import python_ta\n python_ta.check_all()\n","repo_name":"snak3codes/exam-practice","sub_path":"preps/prep8/prep8.py","file_name":"prep8.py","file_ext":"py","file_size_in_byte":6378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9363009003","text":"# 将一一分为二,就画出了半径为二分之一的两个圆弧,此为一生二,这两道圆将大圆分成了阴阳两部分,事物分成正反两方面\n# 到了最关键的画阴阳眼了,将二分之一,一分为二,连分三次(也就是0.5*0.5*0.5=1/8),就画出了半径为八分之一的阴阳眼,阴阳眼的面积为π*1/64,刚好是大圆面积的64分之一,\n# 暗合八八六十四卦,也就是二生三三生万物,也就是用八八六十四卦来类比天地万物。\n\n# https://blog.csdn.net/weixin_42398141/article/details/112232589\n# 先天太极八卦图的太极是顺时针旋转;阴阳鱼位置是:阳鱼在上,阴鱼在下,阴阳鱼眼在同一条水平线上\n# 黑色表示属阴, 黑中白点表示阴中有阳.白色表示属阳, 白中黑点表示阳中有阴.\n\nfrom turtle import *\nradius = 300\nup()\nsetpos(pos()+(-radius, 0))\ndown()\n\n# 绘制阴鱼\nbegin_fill()\nseth(270)\ncircle(radius, 180)\nseth(270)\ncircle(-radius/2.0, 180)\ncircle(radius/2.0, 180)\nend_fill()\n\n# 绘制阳鱼\nseth(90)\ncircle(-radius, 180)\n\n# 绘制阴眼\nup()\nsetpos(pos()+(-3/8.0*radius, 0))\ndown()\nbegin_fill()\ncircle(-1/8.0*radius)\nend_fill()\n\nup()\nhome()\nsetpos(pos()+(-3/8.0*radius, 0))\ndown()\n\n# 绘制阳眼\nfillcolor(\"white\")\nbegin_fill()\nseth(270)\ncircle(-1/8.0*radius)\nend_fill()\n\ndone()\n","repo_name":"chucklu/PythonTest","sub_path":"2022/chapter2/practice/2.12-taiji.py","file_name":"2.12-taiji.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28449922791","text":"def solution(n, plans):\n dx = [-1,0,1,0]\n dy = [0,1,0,-1]\n move = ['U', 'R', 'D', 'L']\n x,y = 1,1\n\n for plan in plans:\n idx = move.index(plan)\n nx = x + dx[idx]\n ny = y + dy[idx]\n if nx < 1 or ny < 1 or nx > n or ny > n:\n continue\n\n x,y = nx,ny\n\n return (x,y)\n\nprint(solution(5, ['R','R','R','U','D','D']))\n","repo_name":"imsoncod/Python-Algorithm","sub_path":"이코테2021/시뮬레이션/상하좌우.py","file_name":"상하좌우.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"12809469348","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport pandas as pd\nimport numpy as np\nimport pymysql\nfrom sqlalchemy import create_engine\nfrom datetime import datetime\n\nengine = create_engine(\"mysql+pymysql://yusupov_av:password@10.167.128.42/yusupov_av\")\n\nreadFileExcel = r\"C:\\Users\\yav\\Desktop\\report - 2022-07-08T090812.271\\exportFnsRegisterAndActs.xlsx\"\n\nexcelFile = pd.ExcelFile(readFileExcel)\n_Xls1 = excelFile.parse(sheet_name = \"Данные\", header=1, dtype=str)\n_Xls2 = excelFile.parse(sheet_name = \"Данные_1\", header=1, dtype=str)\nXls = pd.concat([_Xls1, _Xls2], ignore_index=True)\nXls['C_CHARGE_RATE_SQUARE_METERS'] = Xls['C_CHARGE_RATE_SQUARE_METERS'].astype(float)\nXls['C_SHOPPING_ROOM_AREA'] = Xls['C_SHOPPING_ROOM_AREA'].astype(float)\nXls['C_QUARTER_FEE'] = Xls['C_QUARTER_FEE'].astype(int)\nXls['C_QUARTER_FEE_CALCULATING'] = Xls['C_QUARTER_FEE_CALCULATING'].astype(int)\n\nXls.to_sql('tc_1', if_exists='replace', con=engine)\n\n\n# In[5]:\n\n\nimport pandas as pd\nimport sqlite3\nfrom sqlite3 import OperationalError\nfrom datetime import datetime as dt\nfrom sqlalchemy import create_engine\n\nprint(\"====== [START EXPORT TC1] ======\")\n\nexportDir = 'Z:/ТС/Аналитика/Сотрудники/Медведев Р.А/EXPORT_DATA/'\n\n\nconn = create_engine(\"mysql+pymysql://yusupov_av:password@10.167.128.42/yusupov_av\")\n\nactualTC1TableName = f\"TC_1-{dt.now().strftime('%Y-%m-%d')}\"\ntc1Df = pd.read_sql(\"SELECT * FROM `tc_1`\", con=conn)\n\ntc1shortDf = pd.DataFrame()\n\ntc1shortDf['C_IGNORING_TYPE'] = tc1Df['C_IGNORING_TYPE']\ntc1shortDf['C_DOC_DATE'] = tc1Df['C_DOC_DATE']\ntc1shortDf['C_MARK_NOTICE'] = tc1Df['C_MARK_NOTICE']\ntc1shortDf['C_TAX_AUTORITY_CODE'] = tc1Df['C_TAX_AUTORITY_CODE']\ntc1shortDf['C_BUILDING_CADASTRAL_NUMBER'] = tc1Df['C_BUILDING_CADASTRAL_NUMBER']\ntc1shortDf['C_CHARGE_RATE_SQUARE_METERS'] = tc1Df['C_CHARGE_RATE_SQUARE_METERS']\ntc1shortDf['C_OBJECT_ID'] = tc1Df['C_OBJECT_ID']\ntc1shortDf['C_OBJECT_NAME'] = tc1Df['C_OBJECT_NAME']\n\ntc1shortDf['C_OBJECT_TYPE'] = tc1Df['C_OBJECT_TYPE']\ntc1shortDf['C_QUARTER_FEE'] = tc1Df['C_QUARTER_FEE']\ntc1shortDf['C_QUARTER_FEE_CALCULATING'] = tc1Df['C_QUARTER_FEE_CALCULATING']\ntc1shortDf['C_SHOPPING_ROOM_AREA'] = tc1Df['C_SHOPPING_ROOM_AREA']\ntc1shortDf['C_TRADE_KIND'] = tc1Df['C_TRADE_KIND']\ntc1shortDf['C_USE_OBJECT_EMERGENCE_DATE'] = tc1Df['C_USE_OBJECT_EMERGENCE_DATE']\ntc1shortDf['C_STOP_USING_DATE'] = tc1Df['C_STOP_USING_DATE']\ntc1shortDf['C_ACTIVITY_TERMINATION_DATE'] = tc1Df['C_ACTIVITY_TERMINATION_DATE']\n\ntc1shortDf['C_STOP_USING_REASON'] = tc1Df['C_STOP_USING_REASON']\ntc1shortDf['C_NOTIFICATION_ANNULMENT'] = tc1Df['C_NOTIFICATION_ANNULMENT']\ntc1shortDf['C_ANNULMENT_CAUSE'] = tc1Df['C_ANNULMENT_CAUSE']\ntc1shortDf['C_UNIQUE_TRADE_OBJECT_ID'] = tc1Df['C_UNIQUE_TRADE_OBJECT_ID']\ntc1shortDf['C_PAYER_FEES_FAMILY'] = tc1Df['C_PAYER_FEES_FAMILY']\ntc1shortDf['ACT_FIELDS'] = tc1Df['ACT_FIELDS']\ntc1shortDf['ACT_NUMBER'] = tc1Df['ACT_NUMBER']\n\ntc1shortDf['ACT_CANCELED'] = tc1Df['ACT_CANCELED']\ntc1shortDf['C_COMPANY_NAME'] = tc1Df['C_COMPANY_NAME']\ntc1shortDf['C_INN'] = tc1Df['C_INN']\ntc1shortDf['ADDRESS_FIELDS'] = tc1Df['ADDRESS_FIELDS']\ntc1shortDf['C_BUILDING'] = tc1Df['C_BUILDING']\ntc1shortDf['C_CITY'] = tc1Df['C_CITY']\ntc1shortDf['C_HOUSE'] = tc1Df['C_HOUSE']\n\ntc1shortDf['C_LOCALITY'] = tc1Df['C_LOCALITY']\ntc1shortDf['C_REGION'] = tc1Df['C_REGION']\ntc1shortDf['C_ROOM'] = tc1Df['C_ROOM']\ntc1shortDf['C_STREET'] = tc1Df['C_STREET']\ntc1shortDf['C_ADMINISTRATIVE_DISTRICT'] = tc1Df['C_ADMINISTRATIVE_DISTRICT']\ntc1shortDf['C_OKTMO'] = tc1Df['C_OKTMO']\ntc1shortDf['C_REGISTRATION_DATE'] = tc1Df['C_REGISTRATION_DATE']\ntc1shortDf['C_LOAD_DATE'] = tc1Df['C_LOAD_DATE']\ntc1shortDf['C_SYNCHRONIZATION_DATE'] = tc1Df['C_SYNCHRONIZATION_DATE']\ntc1shortDf['C_PLACEMENT_NTO_NUMBER_PERMITS'] = tc1Df['C_PLACEMENT_NTO_NUMBER_PERMITS']\ntc1shortDf['(ИНФО) Дата Выгрузки'] = actualTC1TableName\n\ntc1shortDf.to_excel(exportDir + \"TC1_BACKUP/\" + actualTC1TableName + '.xlsx', sheet_name='ТС1')\n\"\"\"\nwith pd.ExcelWriter(exportDir + \"TC1_BACKUP/\" + actualTC1TableName + '.xlsx', engine='openpyxl', mode='a') as writer:\n infoPd = pd.DataFrame({\"Признак\": ['Дата Выгрузки'], \"Значение\": [actualTC1TableName]})\n infoPd.to_excel(writer, sheet_name='Информация')\n\n writer.save()\n\nwriter.close()\n\"\"\"\n\n\nprint(\"====== [END EXPORT TC1] ======\")\n\n\n# # Для уведомлений\n\n# In[ ]:\n\n\nimport pandas as pd\nimport sqlite3\nfrom sqlite3 import OperationalError\nfrom datetime import datetime as dt\nfrom sqlalchemy import create_engine\n\nwith open('Z:\\ТС\\Сбор информации\\Сотрудники\\Андрей Ю\\python\\lists\\login_pass_ip.txt', encoding='utf-8') as file:\n a = file.readlines()\n\nconn = create_engine(f\"mysql+pymysql://{a[0].strip()}:{a[1].strip()}@{a[2].strip()}/yusupov_av\")\n\ns = pd.read_sql(\"SELECT `INN`, `STREET`, `DATE_STOP_PATENT`, `DATE_START_PATENT`, `DATE_LOSS_PATENT`, `DATE_CESSATION_PATENT`, `DATE_STOP_USE_PATENT`, `HOUSE`, `KORP` FROM PSN WHERE DATE_START_PATENT > '2021-12-31'\", con=conn)\n\n\n# # Для патентов\n\n# In[3]:\n\n\nimport pandas as pd\nimport numpy as np\nimport pymysql\nfrom sqlalchemy import create_engine\nfrom datetime import datetime\nengine = create_engine(\"mysql+pymysql://yusupov_av:password@10.167.128.42/yusupov_av\")\n\nreadFileExcel = r\"Z:\\ТС\\Аналитика\\Текущая выгрузка\\ПСН\\Excel\\20220630_PSN.xlsx\"\n\nexcelFile = pd.ExcelFile(readFileExcel)\nXLs = excelFile.parse(sheet_name = \"Лист1\", header=0, dtype=str)\nXLs['DATE_START_PATENT'] = pd.to_datetime(XLs['ДатаНачПт']).dt.date\nXLs['INN'] = XLs['ИННЮЛ']\nXLs['STREET'] = XLs['Улица1']\nXLs['HOUSE'] = XLs['Дом1']\nXLs['KORP'] = XLs['Корпус1']\nXLs['district'] = XLs['Район1']\nXLs['city'] = XLs['Город1']\nXLs['locality'] = XLs['НаселПункт1']\n\nXLs['STREET1'] = XLs['Улица2']\nXLs['HOUSE1'] = XLs['Дом2']\nXLs['KORP1'] = XLs['Корпус2']\nXLs['district1'] = XLs['Район2']\nXLs['city1'] = XLs['Город2']\nXLs['locality1'] = XLs['НаселПункт2']\n\nXLs['DATE_STOP_PATENT'] = pd.to_datetime(XLs['ДатаКонПт']).dt.date\nXLs['DATE_LOSS_PATENT'] = pd.to_datetime(XLs['ДатаУтрПСН']).dt.date\nXLs['DATE_CESSATION_PATENT'] = pd.to_datetime(XLs['ДатаПрекрПСН']).dt.date\nXLs['DATE_STOP_USE_PATENT'] = pd.to_datetime(XLs['ДатаПрекрПримПСН']).dt.date\nXLs.to_sql('PSN', if_exists='replace', con=engine)\n\n\n# # Для ЕСХН\n\n# In[ ]:\n\n\nengine = create_engine(\"mysql+pymysql://yusupov_av:password@10.167.128.42/yusupov_av\")\n\nreadFileExcel = r\"Z:\\ТС\\Аналитика\\Текущая выгрузка\\ЕСХН\\20220324 ЕСХН.xlsx\"\n\nexcelFile = pd.ExcelFile(readFileExcel)\nXLs = excelFile.parse(sheet_name = \"Список\", header=0, dtype=str)\nXLs['ДатаНачЕСХН'] = pd.to_datetime(XLs['ДатаНачЕСХН']).dt.date\nXLs['ДатаКонЕСХН'] = pd.to_datetime(XLs['ДатаКонЕСХН']).dt.date\nXLs.to_sql('ESHN', if_exists='replace', con=engine)\n\n\n# In[ ]:\n\n\necxn = pd.read_sql(\"SELECT `ИНН`, `ДатаНачЕСХН`, `ДатаКонЕСХН` FROM ESHN\", con=conn)\n\n\n# In[ ]:\n\n\necxn\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Scream183/convert-and-sending-to-database","sub_path":"Loader to DB TC, PSN, ESHN (1).py","file_name":"Loader to DB TC, PSN, ESHN (1).py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42191072249","text":"import logging\nfrom ..devicetypes import generic, misc, sensors, thermostats\n\nLOG = logging.getLogger(__name__)\n\ntry:\n UNSUPPORTED = generic.HGDevice\n SUPPORTED = {}\n SUPPORTED.update(sensors.DEVICETYPES)\n SUPPORTED.update(thermostats.DEVICETYPES)\n SUPPORTED.update(misc.DEVICETYPES)\nexcept Exception as err:\n LOG.critical(\"devicetypes Exception: %s\" % (err,))\n UNSUPPORTED = False\n SUPPORTED = {}\n","repo_name":"mikyjazz/max-component","sub_path":"custom_components/max/pymax/devicetypes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"40377713656","text":"import asyncio\n\nfrom apscheduler.triggers.cron import CronTrigger\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\n\n\nTIMEZONE = 'America/Sao_Paulo'\n\n_scheduler = None\n\n\nasync def enable(events):\n global _scheduler\n loop = asyncio.get_running_loop()\n _scheduler = AsyncIOScheduler(event_loop=loop)\n _scheduler.start()\n\n\ndef schedule(func, crontab):\n \"\"\"https://crontab.guru \\n minute hour day_month month day_week\"\"\"\n global _scheduler\n trigger = CronTrigger.from_crontab(crontab, timezone=TIMEZONE)\n return _scheduler.add_job(func, trigger=trigger)\n\n\ndef suspend(job):\n global _scheduler\n _scheduler.remove_job(job)\n","repo_name":"pitzer42/rembot","sub_path":"rembot/services/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"71851158887","text":"import hashlib\nimport hmac\nfrom urllib import urlencode\nimport requests\nimport json\nimport time\n\n\nclass CEX_IO(object):\n BASE_URL = \"https://cex.io/api/\"\n username = ''\n api_key = ''\n api_secret = ''\n nonce_v = ''\n\n def __init__(self, username, api_key, api_secret):\n self.last_price = 0\n self.currency_1 = \"ETH\"\n self.currency_2 = \"USD\"\n self.trading_fee = 0.2\n self.sell_price = 0\n self.username = username\n self.api_key = api_key\n self.api_secret = api_secret\n\n # get timestamp as nonce\n def __nonce(self):\n self.nonce_v = '{:.10f}'.format(time.time() * 1000).split('.')[0]\n return self.nonce_v\n\n # generate segnature\n def __signature(self):\n string = self.nonce_v + self.username + self.api_key # create string\n signature = hmac.new(self.api_secret, string,\n digestmod=hashlib.sha256).hexdigest().upper() # create signature\n return signature\n\n def place_sell_order(self, **params):\n url = self.BASE_URL + \"place_order/{}/{}\".format(self.currency_1, self.currency_2)\n params.update({\n 'key': self.api_key,\n 'signature': self.__signature(),\n 'nonce': self.__nonce(),\n 'type': \"sell\"})\n params = urlencode(params)\n ret = requests.post(url, params).text\n data = json.loads(ret)\n if data.get(\"error\", \"\") != \"\":\n print(\"Problem in placing order {}\".format(data['error']))\n else:\n print(\"Order Detail is {}\".format(data))\n\n def get_sell_price(self):\n ret = requests.get(self.BASE_URL + \"tickers/{}/{}\".format(self.currency_1, self.currency_2))\n data = json.loads(ret.text)\n for item in (data['data']):\n if item['pair'] == \"ETH:USD\":\n self.sell_price = item['bid']\n return self.sell_price\n\n def get_last_price(self):\n return self.last_price\n\n def update_last_price(self):\n ret = requests.get(self.BASE_URL + \"last_price/{}/{}\".format(self.currency_1, self.currency_2))\n data = json.loads(ret.text)\n self.last_price = float(data['lprice'])\n\nif __name__ == '__main__':\n ci = CEX_IO(\"user\", \"api\", \"secret\")\n ci.place_sell_order(amount=0.01, price=6100)","repo_name":"Abhisheksoni1/bot","sub_path":"cex_io.py","file_name":"cex_io.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1777841108","text":"import random\n\nSECURE = True\n\ndef isValid(text):\n text = text.lower()\n if 'wie' in text and (('geht' in text and 'dir' in text) or 'läuft' in text or 'geht\\'s' in text or 'gehts' in text):\n return True\n else:\n return False\n\ndef handle(text, tiane, profile):\n answers = ['Danke, gut!',\n 'Mir gehts gut, {}.'.format(tiane.user),\n 'Alles gut, {}.'.format(tiane.user)]\n tiane.say(random.choice(answers))\n tiane.say('Und wie geht es dir?')\n reply = tiane.listen()\n reply = reply.lower()\n if 'nicht so' in reply or 'schlecht' in reply or 'müde' in reply or 'mies' in reply or 'suboptimal' in reply:\n tiane.say('Das ist schade. Mach doch etwas, was du gerne tust, vielleicht geht es dir dann besser.')\n elif 'gut' in reply or 'besser' in reply or 'bestens' in reply or 'super' in reply or 'wundervoll' in reply or 'glücklich' in reply or 'froh' in reply:\n tiane.say('Das freut mich!')\n else:\n tiane.say('Ich fürchte, ich konnte dich nicht verstehen.')\n","repo_name":"FerdiKr/TIANE","sub_path":"server/modules/how_are_you.py","file_name":"how_are_you.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"de","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"}
+{"seq_id":"72906165608","text":"#!/usr/bin/python\n# Filename: rrc_analyzer.py\n\"\"\"\nA RRC analyzer that integrates LTE and WCDMA RRC\n\nAuthor: Yuanjie Li\n\"\"\"\nimport xml.etree.ElementTree as ET\nfrom .analyzer import *\nfrom .wcdma_rrc_analyzer import WcdmaRrcAnalyzer\nfrom .lte_rrc_analyzer import LteRrcAnalyzer\n\n__all__ = [\"RrcAnalyzer\"]\n\n\nclass RrcAnalyzer(Analyzer):\n \"\"\"\n A protocol ananlyzer for 3G/4G Radio Resource Control (RRC).\n It depends on WcdmaRrcAnalyzer and LteRrcAnalyzer, and integrates configurations\n from both ananlyzers.\n \"\"\"\n\n def __init__(self):\n Analyzer.__init__(self)\n\n # include analyzers\n self.include_analyzer(\"WcdmaRrcAnalyzer\", [self.__on_event])\n self.include_analyzer(\"LteRrcAnalyzer\", [self.__on_event])\n\n self.__cur_RAT = None # current RAT\n\n # init packet filters\n self.add_source_callback(self.__rrc_filter)\n\n def __rrc_filter(self, msg):\n \"\"\"\n Callback to process RRC messages.\n\n :param msg: the WCDMA/LTE RRC message from trace source.\n :type msg: Event\n \"\"\"\n\n if msg.type_id.find(\"LTE\") != -1: # LTE RRC msg received, so it's LTE\n self.__cur_RAT = \"LTE\"\n # WCDMA RRC msg received, so it's WCDMA\n elif msg.type_id.find(\"WCDMA\") != -1:\n self.__cur_RAT = \"WCDMA\"\n\n def __on_event(self, event):\n \"\"\"\n Triggered by WcdmaRrcAnalyzer and/or LteRrcAnalyzer.\n Push the event to analyzers that depend on RrcAnalyzer\n\n :param event: the event raised by WcdmaRrcAnalyzer and/or LteRrcAnalyzer.\n :type event: Event\n \"\"\"\n e = Event(event.timestamp, \"RrcAnalyzer\", event.data)\n self.send(e)\n\n def get_cell_list(self):\n \"\"\"\n Get a complete list of cell IDs.\n\n :returns: a list of cells the device has associated with\n \"\"\"\n # lte_cell_list=self.__lte_rrc_analyzer.get_cell_list()\n lte_cell_list = self.get_analyzer(\"LteRrcAnalyzer\").get_cell_list()\n # wcdma_cell_list=self.__wcdma_rrc_analyzer.get_cell_list()\n wcdma_cell_list = self.get_analyzer(\"WcdmaRrcAnalyzer\").get_cell_list()\n return lte_cell_list + wcdma_cell_list\n\n def get_cell_config(self, cell):\n \"\"\"\n Return a cell's active/idle-state configuration.\n\n :param cell: a cell identifier\n :type cell: a (cell_id,freq) pair\n :returns: this cell's active/idle-state configurations\n :rtype: LteRrcConfig or WcdmaRrcConfig\n \"\"\"\n # res=self.__lte_rrc_analyzer.get_cell_config(cell)\n res = self.get_analyzer(\"LteRrcAnalyzer\").get_cell_config(cell)\n if res:\n return res\n else:\n # return self.__wcdma_rrc_analyzer.get_cell_config(cell)\n return self.get_analyzer(\"WcdmaRrcAnalyzer\").get_cell_config(cell)\n\n def get_cur_cell(self):\n \"\"\"\n Get current cell's status\n\n :returns: current cell's status\n :rtype: LteRrcStatus or WcdmaRrcStatus\n \"\"\"\n if self.__cur_RAT == \"LTE\":\n # return self.__lte_rrc_analyzer.get_cur_cell()\n return self.get_analyzer(\"LteRrcAnalyzer\").get_cur_cell()\n elif self.__cur_RAT == \"WCDMA\":\n # return self.__wcdma_rrc_analyzer.get_cur_cell()\n return self.get_analyzer(\"WcdmaRrcAnalyzer\").get_cur_cell()\n else:\n return None\n\n def get_cur_cell_config(self):\n \"\"\"\n Get current cell's configuration\n\n :returns: current cell's status\n :rtype: LteRrcConfig or WcdmaRrcConfig\n \"\"\"\n if self.__cur_RAT == \"LTE\":\n # return self.__lte_rrc_analyzer.get_cur_cell_config()\n return self.get_analyzer(\"LteRrcAnalyzer\").get_cur_cell_config()\n elif self.__cur_RAT == \"WCDMA\":\n # return self.__wcdma_rrc_analyzer.get_cur_cell_config()\n return self.get_analyzer(\"WcdmaRrcAnalyzer\").get_cur_cell_config()\n else:\n return None\n\n def get_cell_on_freq(self, freq):\n \"\"\"\n Given a frequency band, get all cells under this freq in the cell_list.\n\n :param freq: a frequency band\n :type freq: int\n \"\"\"\n cell_list = self.get_cell_list()\n res = []\n for cell in cell_list:\n if cell[1] == freq:\n res.append(cell)\n return res\n\n def get_cell_neighbor(self, cell):\n \"\"\"\n Given a cell, return its neighbor cells\n\n :param cell: a cell identifier\n :type cell: a (cell_id,freq) pair\n \"\"\"\n cell_config = self.get_cell_config(cell)\n cell_freq = cell_config.status.freq\n inter_freq_dict = cell_config.sib.inter_freq_config\n neighbor_cells = []\n\n # add intra-freq neighbors\n neighbor_cells += self.get_cell_on_freq(cell[1])\n neighbor_cells.remove(cell) # remove the cell itself\n\n # add inter-freq/RAT neighbors\n for freq in inter_freq_dict:\n neighbor_cells += self.get_cell_on_freq(freq)\n\n # WCDMA: any cells under WCDMA can be its neighbors\n if cell_config.status.rat == \"UTRA\":\n cell_list = self.get_cell_list()\n for item in cell_list:\n if self.get_cell_config(item).status.rat == \"UTRA\" \\\n and item not in neighbor_cells:\n neighbor_cells.append(item)\n\n if cell in neighbor_cells:\n neighbor_cells.remove(cell) # remove the current cell itself\n\n return neighbor_cells\n","repo_name":"mobile-insight/mobileinsight-core","sub_path":"mobile_insight/analyzer/rrc_analyzer.py","file_name":"rrc_analyzer.py","file_ext":"py","file_size_in_byte":5524,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"53"}
+{"seq_id":"5509417226","text":"import json\n\nimport random\nfrom typing import List, Optional, Dict\nimport logging\nfrom pprint import pformat\nimport glob\nfrom collections import defaultdict\n\nfrom FLD_generator.formula import Formula\nfrom FLD_generator.argument import Argument\nfrom FLD_generator.proof_tree_generators import build as build_generator\nfrom FLD_generator.formula_distractors import build as build_distractor\nfrom FLD_generator.translation_distractors import build as build_translation_distractor\nfrom FLD_generator.proof_tree_generation_pipeline import ProofTreeGenerationPipeline\nfrom FLD_generator.datasets import NLProofSDataset\nfrom FLD_generator.word_banks import build_wordnet_wordbank\nfrom FLD_generator.translators import (\n build as build_translator,\n TemplatedTranslator,\n)\nfrom FLD_generator.interpretation import formula_is_identical_to\nfrom FLD_generator.utils import nested_merge, log_results\nfrom logger_setup import setup as setup_logger\n\nimport line_profiling\n\nlogger = logging.getLogger(__name__)\n\n\n@profile\ndef generate_dataset(dataset: NLProofSDataset,\n num_dataset: int = 10000) -> None:\n # agg_stats: Dict[str, int] = defaultdict(int)\n for i_sample, (nlproof_json, proof_tree, distractors, translation_distractors, stats) in enumerate(dataset.generate(num_dataset)):\n log_results(logger, i_sample=i_sample, nlproof_json=nlproof_json, proof_tree=proof_tree,\n distractors=distractors, translation_distractors=translation_distractors,\n stats=None)\n # for name, count in stats.items():\n # if count is not None:\n # agg_stats[name] += count\n # logger.info(pformat(dict(agg_stats)))\n\n\n@profile\ndef test_generate_dataset():\n\n word_bank = None\n # word_bank = build_wordnet_wordbank('eng')\n\n translator = None\n # translator = build_translator(\n # ['./configs/translations/thing.v1/'],\n # word_bank,\n # use_fixed_translation=False,\n # reused_object_nouns_max_factor=1.0,\n # limit_vocab_size_per_type=None,\n # # volume_to_weight='sqrt',\n # volume_to_weight='logE',\n # default_weight_factor_type='W_VOL__1.0',\n # adj_verb_noun_ratio='1-1-1',\n # )\n\n translation_distractor = None\n # translation_distractor = build_translation_distractor(word_bank=word_bank)\n\n generator = build_generator(\n [\n\n './configs/arguments/axioms/',\n\n # './configs/arguments/axioms/axiom.pred_only.json',\n # './configs/arguments/axioms/axiom.pred_arg.json',\n\n # './configs/arguments/axioms/axiom.and_or.pred_only.json',\n # './configs/arguments/axioms/axiom.and_or.pred_arg.json',\n\n # './configs/arguments/axioms/axiom.implication_intro.pred_only.json',\n # './configs/arguments/axioms/axiom.implication_intro.pred_arg.json',\n\n # './configs/arguments/axioms/axiom.negation.pred_only.json',\n # './configs/arguments/axioms/axiom.negation.pred_arg.json',\n\n # './configs/arguments/others/AACorpus.pred_arg.json',\n\n # # -- we exclude the below for speed --\n # './configs/arguments/theorems/theorem.pred_only.json',\n # './configs/arguments/theorems/theorem.pred_arg.json',\n\n # './configs/arguments/theorems/theorem.and_or.pred_only.json',\n # './configs/arguments/theorems/theorem.and_or.pred_arg.json',\n\n # './configs/arguments/theorems/theorem.G_MP.pred_arg.json',\n\n # -- not tested. may not work --\n # './configs/arguments/theorems/universal_theorem.axiom.pred_arg.json',\n # './configs/arguments/theorems/universal_theorem.theorem.pred_arg.json',\n\n # not that important universal theorems\n # './configs/arguments/theorems/universal_theorem.axiom.and_or.pred_arg.json',\n # './configs/arguments/theorems/universal_theorem.axiom.implication_intro.pred_arg.json',\n # './configs/arguments/theorems/universal_theorem.axiom.negation.pred_arg.json',\n # './configs/arguments/theorems/universal_theorem.theorem.and_or.pred_arg.json',\n ],\n elim_dneg=True,\n quantifier_axiom_arguments_weight=0.2,\n complex_formula_arguments_weight=0.5,\n quantifier_axioms=[\n 'universal_quantifier_elim',\n 'universal_quantifier_intro',\n 'existential_quantifier_intro',\n 'existential_quantifier_elim',\n ],\n )\n\n distractor = build_distractor(\n # 'various_form',\n # 'mixture(negative_tree.simplified_formula.various_form)',\n # 'mixture(negative_tree_double)',\n # 'mixture(negative_tree_triple)',\n # 'mixture(negative_tree_quadruple)',\n # 'mixture(negative_tree_double.simplified_formula.various_form)',\n # 'fallback(negative_tree.various_form)',\n # 'fallback(various_form.negative_tree)',\n\n # 'fallback(mixture(negative_tree_double).simplified_formula.various_form)',\n 'mixture(negative_tree_double.simplified_formula.various_form)',\n\n generator=generator,\n )\n\n pipeline = ProofTreeGenerationPipeline(\n generator,\n distractor=distractor,\n translation_distractor=translation_distractor,\n fallback_from_formula_to_translation_distractor=True,\n translator=translator,\n add_subj_obj_swapped_distractor=True,\n )\n\n depth_range = (1, 8)\n branch_extensions_range = (0, 5)\n distractors_range = (0, 20)\n\n unknown_ratio = 0.33\n\n use_collapsed_translation_nodes_for_unknown_tree = False\n translation_distractors_range = (0, 5) if translation_distractor is not None else (0, 0)\n\n dataset = NLProofSDataset(\n pipeline,\n\n depth_range,\n branch_extensions_range,\n unknown_ratio=unknown_ratio,\n\n distractors_range=distractors_range,\n\n use_collapsed_translation_nodes_for_unknown_tree=use_collapsed_translation_nodes_for_unknown_tree,\n word_bank=word_bank,\n translation_distractors_range=translation_distractors_range,\n\n raise_if_translation_not_found=True,\n )\n\n generate_dataset(dataset)\n\n\nif __name__ == '__main__':\n random.seed(0)\n setup_logger(level=logging.INFO)\n\n # test_generate_dataset_AACorpus()\n test_generate_dataset()\n","repo_name":"hitachi-nlp/FLD-generator","sub_path":"tests/FLD_generator/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"1362176968","text":"TXTFILE = \"day12.txt\"\nINF = 999999\n\nmap = []\npath = {}\n\ndef get_item (x,y):\n if x < 0 or y < 0:\n return None\n try:\n return map[y][x]\n except:\n return None\n\ndef set_path_cost (x,y,v):\n path [f\"{x}:{y}\"] = v\n\ndef get_path_cost (x,y):\n return path.get(f\"{x}:{y}\", INF)\n\ndef get_cost_to (x1, y1, x2, y2):\n\n a = get_item (x1, y1)\n b = get_item (x2, y2)\n\n # if (a and b) and (abs (ord(a) - ord (b)) <= 1):\n if (a and b) and (ord(b) - ord(a) <= 1):\n return 1\n else:\n return INF\n\ndef calc_paths(x,y): \n\n def calc_path_to_me (x,y, x1, y1): #x1,y1 to me (x,y)\n\n my_cost = get_path_cost (x, y)\n his_cost = get_path_cost (x1, y1)\n step_cost = get_cost_to (x1, y1, x, y)\n\n if my_cost + step_cost < his_cost:\n set_path_cost (x1, y1, my_cost + step_cost)\n to_be_updated.append ([x1,y1])\n\n to_be_updated = []\n\n calc_path_to_me (x, y, x + 1, y)\n calc_path_to_me (x, y, x - 1, y)\n calc_path_to_me (x, y, x, y + 1)\n calc_path_to_me (x, y, x, y - 1)\n\n for i in to_be_updated:\n calc_paths (i[0],i[1])\n\nmr = 0\nwith open(TXTFILE, \"r\") as f:\n for line in f:\n ml = list(line.strip())\n for mc in range(len(ml)):\n if ml[mc] == \"S\":\n start = [mc,mr]\n ml[mc] = \"a\"\n elif ml[mc] == \"E\":\n end = [mc,mr]\n ml[mc] = \"z\"\n set_path_cost (mc, mr, 0)\n map.append (ml)\n mr += 1\n\ncalc_paths (end[0],end[1])\nprint (get_path_cost(start[0],start[1]))\n","repo_name":"cmohorea/adventcode2022","sub_path":"day12-1.py","file_name":"day12-1.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11318302749","text":"import random\n\ndef create(b,low,high,i):\n if i<20:\n b.append(random.randint(low,high))\n create(b,low,high,i+1)\n\ndef filter(b,i):\n if i==len(b):\n return b\n if b[i]%2!=1 or b[i]%3!=0:\n return filter(b, i + 1)\n if i < len(b)-1:\n b[i]=0\n return filter(b, i + 1)\n\n\ndef main():\n b=[]\n create(b,10,90,0)\n print(b)\n filter(b,0)\n print(b)\n\n\nif __name__=='__main__':\n main()","repo_name":"DoNjOnIn/6.1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37090487972","text":"with open('day1.in') as infile:\n data = infile.read().split('\\n')\n\nfinal = 0\nnext_line = 0\n\nchanges = [0]\nduplicates = []\n\nwhile len(duplicates) < 1:\n\n for frequency in data:\n\n symbol = data[next_line][0]\n number = int(data[next_line][1:])\n\n if symbol == '+':\n final += number\n elif symbol == '-':\n final -= number\n\n next_line += 1\n\n if final not in changes:\n changes.append(final)\n else:\n duplicates.append(final)\n \n next_line = 0\n\nprint('The duplicates are:',duplicates)","repo_name":"CowSai4/Python_CSP_Files","sub_path":"advent/2018/day1/day1_part2.py","file_name":"day1_part2.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"44350943185","text":"import json\nfrom requests import get\n\n\"\"\"\nThe idea of the resolution packet is to contain the\nIP address and the real address of the server,\nalong with a (potentially not yet set) ID that\nthe client has. The ID is so that the client can\nupdate the information on file for it\nas needed.\n\"\"\"\nclass RouterInfo(object):\n def __init__(self):\n # Read in the address from the .addr\n # file\n with open(\".addr\") as f:\n self.address = f.read()\n\n # Further, read in the IP address\n # from elsewhere on the internet\n\n self.ip = \\\n get('https://api.ipify.org?format=json').json()['ip']\n\n\n # Also, get the ID from the .id file if\n # it exists\n with open(\".id\") as f:\n self.id = f.read()\n\n def json(self):\n return json.dump(self.__dict__)\n","repo_name":"j-c-w/shareit","sub_path":"resolution/resolution_packet.py","file_name":"resolution_packet.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34941826324","text":"''' \r\nWrite a Python program that asks the user to enter\r\n a text and return user a dictionary whose keys are the words of the text entered and the values are\r\n the thrice of the reverse of the words that make up the text.\r\n '''\r\n\r\nw=input().split()\r\nd={}\r\nfor i in w:\r\n d[i]=(i*3)[::-1]\r\nprint(d)","repo_name":"SUDHARSHAN-12221185/python-pratice-questions","sub_path":"q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21081119750","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n hare,tort=head,head\n prev=None\n i=1\n j=1\n while(i!=n):\n hare=hare.next\n i+=1\n j+=1\n while(hare.next):\n prev=tort\n hare=hare.next\n tort=tort.next\n j+=1\n \n if tort==head:\n head=head.next\n tort.next=None\n if prev:\n prev.next=tort.next\n tort.next=None\n \n return head\n","repo_name":"melvin0008/leetcode","sub_path":"python/removeNthnode.py","file_name":"removeNthnode.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71484320807","text":"import os\nfrom collections import defaultdict\n\nTARGET_VALUE = 2020\n\ndef parse_input(file_name: str) -> []:\n with open(file_name) as in_file:\n return [int(line.strip()) for line in in_file.readlines()]\n\ndef build_input_dict(in_list: list) -> dict:\n in_dict = defaultdict(lambda: 0)\n for val in in_list:\n in_dict[val] += 1\n return in_dict\n\ndef calculate_answer(in_dict: dict, target: int) -> dict:\n half_of_target = int(target/2)\n if target%2 == 0 and half_of_target in in_dict and in_dict[half_of_target] > 1:\n return {\"a\": half_of_target, \"b\": half_of_target, \"mul\": half_of_target**2}\n for k in in_dict:\n if k == half_of_target:\n continue\n if target - k in in_dict:\n if in_dict[target - k] > 0:\n return {\"a\": k, \"b\": target - k , \"mul\": k*(target - k)}\n return None\n\nif __name__ == \"__main__\":\n dir_path = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(dir_path, 'input.txt')\n in_list = parse_input(file_path)\n in_dict = build_input_dict(in_list)\n answer = calculate_answer(in_dict, TARGET_VALUE)\n print(f'a: {answer[\"a\"]}, b: {answer[\"b\"]}, mul: {answer[\"mul\"]}')","repo_name":"nagybalint/advent-of-code-2020","sub_path":"day_1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73949785769","text":"import csv\nimport os\nimport cv2\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom math import ceil\nimport random\nimport sklearn\n\nDATA_LOCATION = '../udacity_data/'\n\n\ndef get_samples():\n \"\"\"\n Get training and validation data\n driving_log.csv contains the udacity provided data\n driving_log_shiv.csv' contains data I generated by running the simulator \n on track 1.\n\n Params:\n ----------\n n/a\n\n Returns\n ----------\n train_samples : list of strings\n filenames of training samples\n validation_samples : list of strings\n filenames of validation samples\n \"\"\"\n samples = []\n\n for datafile in ['driving_log.csv', 'driving_log_shiv.csv']:\n with open(os.path.join(DATA_LOCATION, datafile)) as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None)\n for line in reader:\n samples.append(line)\n\n train_samples, validation_samples = train_test_split(samples, test_size=0.2)\n return train_samples, validation_samples\n\n \ndef generator(samples, batch_size=32):\n \"\"\"\n Coroutine to generate samples for keras' model fit function\n\n Note: Yields 6*batch_size samples at a time. For each \"sample\" in the batch \n images are loaded from the right, left, and center cameras. For each of \n these images their flipped counterparts are appended as well. Thus 6x the \n data is generated from a single image.\n\n Params:\n ----------\n samples: list of strings\n filenames of all images from which to draw batches\n batch_size: int\n desired batch size, default 32\n\n Returns (yields)\n ----------\n XX: np.array of shape (batch_size*6, 160, 320, 3)\n collection of images\n yy: np.array of shape (batch_size*6)\n labels corresponding to images held in XX\n \"\"\"\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n random.shuffle(samples)\n\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n measurements = []\n \n for batch_sample in batch_samples:\n for source_path in batch_sample[0:3]:\n source_path = batch_sample[0]\n filename = source_path.split('/')[-1]\n check_dirs = [os.path.join(DATA_LOCATION, 'IMG' ,filename), \n os.path.join(DATA_LOCATION, 'IMG_shiv' ,filename)]\n current_path = None\n for check_dir in check_dirs:\n if os.path.exists(check_dir):\n current_path = check_dir\n\n \n image = cv2.imread(current_path)\n measurement = float(batch_sample[3]) # steering angle measurement\n\n # Use the left and right cameras to generate additional data\n # Add a correction factor so the car steers away from the edges of the road \n if 'left' in filename:\n measurement += 0.2\n elif 'right' in filename:\n measurement -= 0.2\n\n images.append(image)\n measurements.append(measurement)\n\n # add a flipped version of the image for further data augmentation\n # flip the steering angle as well\n image_flipped = np.fliplr(image)\n images.append(image_flipped)\n measurements.append(-measurement)\n\n\n XX = np.array(images)\n yy = np.array(measurements)\n \n yield sklearn.utils.shuffle(XX, yy)\n\n\ndef get_model():\n \"\"\"\n Implement model similar to the one used by NVIDIA in their paper \n \"End to End Learning for Self-Driving Cars\".\n\n Params: \n ----------\n n/a\n\n Returns\n ----------\n train_samples : keras.Model()\n final model architecture\n \"\"\"\n\n model = Sequential()\n model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))\n model.add(Cropping2D(cropping=((70,25),(0,0))))\n model.add(Conv2D(24,5,strides=(2,2),activation=\"relu\"))\n model.add(Conv2D(36,5,strides=(2,2),activation=\"relu\"))\n model.add(Conv2D(48,5,strides=(2,2),activation=\"relu\"))\n model.add(Conv2D(64,3,activation=\"relu\"))\n model.add(Conv2D(64,3,activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(100))\n model.add(Dense(50))\n model.add(Dense(10))\n model.add(Dense(1))\n model.compile(loss='mse', optimizer='adam')\n model.summary()\n return model\n\ndef train_and_save_model(model, train_samples, validation_samples):\n \"\"\"\n Train and save model. Save picture of loss curves.\n\n Params: \n ----------\n model: keras.Model()\n model to train!\n train_samples : list of strings\n filenames of training samples\n validation_samples : list of strings\n filenames of validation samples\n\n Returns\n ----------\n n/a\n \"\"\"\n batch_size = 32\n train_generator = generator(train_samples, batch_size=batch_size)\n validation_generator = generator(validation_samples, batch_size=batch_size)\n history_object = model.fit_generator(train_generator, steps_per_epoch=ceil(len(train_samples)/batch_size),\n validation_data=validation_generator,\n validation_steps=ceil(len(validation_samples)/batch_size),\n epochs=5, verbose=1)\n\n model.save('model_data_augmentation.h5')\n\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.savefig('loss.png')\n\n\nif __name__ == '__main__':\n train_samples, validation_samples = get_samples()\n model = get_model()\n random.seed(1234)\n train_and_save_model(model, train_samples, validation_samples)\n","repo_name":"kaulshiv/behavioral_cloning","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33774317798","text":"from huobi_future.impl import RestApiRequest\nfrom huobi_future.impl.utils.urlparamsbuilder import UrlParamsBuilder\nfrom huobi_future.impl.utils.apisignature import create_signature\nfrom huobi_future.impl.accountinfomap import account_info_map\nfrom huobi_future.impl.utils.inputchecker import *\nfrom huobi_future.impl.utils.timeservice import *\nfrom huobi_future.model import *\n\n\nclass RestApiRequestImpl(object):\n # __MARKET_URL = \"https://api.huobi.pro:443\"\n # __TRADING_URL = \"https://api.huobi.pro:443\"\n\n def __init__(self, api_key, secret_key, server_url=\"https://api.huobi.pro\"):\n self.__api_key = api_key\n self.__secret_key = secret_key\n self.__server_url = server_url\n\n def __create_request_by_get(self, url, builder):\n request = RestApiRequest()\n request.method = \"GET\"\n request.host = self.__server_url\n request.header.update({'Content-Type': 'application/json'})\n request.url = url + builder.build_url()\n return request\n\n def __create_request_by_post_with_signature(self, url, builder):\n request = RestApiRequest()\n request.method = \"POST\"\n request.host = self.__server_url\n create_signature(self.__api_key, self.__secret_key, request.method, request.host + url, builder)\n request.header.update({'Content-Type': 'application/json'})\n request.post_body = builder.post_map\n request.url = url + builder.build_url()\n return request\n\n def __create_request_by_get_with_signature(self, url, builder):\n request = RestApiRequest()\n request.method = \"GET\"\n request.host = self.__server_url\n create_signature(self.__api_key, self.__secret_key, request.method, request.host + url, builder)\n request.header.update({\"Content-Type\": \"application/x-www-form-urlencoded\"})\n request.url = url + builder.build_url()\n return request\n\n def get_exchange_timestamp(self):\n request = self.__create_request_by_get(\"/v1/common/timestamp\", UrlParamsBuilder())\n\n def parse(json_wrapper):\n return convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"data\"))\n\n request.json_parser = parse\n return request\n\n def get_candlestick(self, symbol, interval, size, start_time=None, end_time=None):\n check_symbol(symbol)\n check_range(size, 1, 2000, \"size\")\n\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"period\", interval)\n builder.put_url(\"size\", size)\n builder.put_url(\"start_time\", start_time)\n builder.put_url(\"end_time\", end_time)\n\n request = self.__create_request_by_get(\"/market/history/kline\", builder)\n\n def parse(json_wrapper):\n candlestick_list = list()\n data_list = json_wrapper.get_array(\"data\")\n for item in data_list.get_items():\n candlestick = Candlestick()\n candlestick.timestamp = convert_cst_in_second_to_utc(item.get_int(\"id\"))\n candlestick.low = item.get_float(\"low\")\n candlestick.high = item.get_float(\"high\")\n candlestick.amount = item.get_float(\"amount\")\n candlestick.open = item.get_float(\"open\")\n candlestick.close = item.get_float(\"close\")\n candlestick.volume = item.get_float(\"vol\")\n candlestick.count = item.get_int(\"count\")\n candlestick_list.append(candlestick)\n return candlestick_list\n\n request.json_parser = parse\n return request\n\n def get_price_depth(self, symbol, size=None):\n check_symbol(symbol)\n check_range(size, 1, 150, \"size\")\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"type\", \"step0\")\n request = self.__create_request_by_get(\"/market/depth\", builder)\n\n def parse(json_wrapper):\n tick = json_wrapper.get_object(\"tick\")\n dp = PriceDepth()\n dp.timestamp = convert_cst_in_millisecond_to_utc(tick.get_int(\"ts\"))\n bid_array = tick.get_array(\"bids\")\n ask_array = tick.get_array(\"asks\")\n bids = list()\n asks = list()\n for i in range(0, size):\n bid_entry = bid_array.get_array_at(i)\n entry = DepthEntry()\n entry.price = bid_entry.get_float_at(0)\n entry.amount = bid_entry.get_float_at(1)\n bids.append(entry)\n for i in range(0, size):\n ask_entry = ask_array.get_array_at(i)\n entry = DepthEntry()\n entry.price = ask_entry.get_float_at(0)\n entry.amount = ask_entry.get_float_at(1)\n asks.append(entry)\n dp.bids = bids\n dp.asks = asks\n return dp\n\n request.json_parser = parse\n return request\n\n def get_historical_trade(self, symbol, form_id, size):\n check_symbol(symbol)\n check_range(size, 1, 2000, \"size\")\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"size\", size)\n request = self.__create_request_by_get(\"/market/history/trade\", builder)\n\n def parse(json_wrapper):\n data_array = json_wrapper.get_array(\"data\")\n trade_list = list()\n for item in data_array.get_items():\n data_array_in = item.get_array(\"data\")\n for item_in in data_array_in.get_items():\n local_trade = Trade()\n local_trade.price = item_in.get_float(\"price\")\n local_trade.amount = item_in.get_float(\"amount\")\n local_trade.trade_id = item_in.get_int(\"id\")\n local_trade.timestamp = convert_cst_in_millisecond_to_utc(item_in.get_int(\"ts\"))\n local_trade.direction = item_in.get_string(\"direction\")\n trade_list.append(local_trade)\n return trade_list\n\n request.json_parser = parse\n return request\n\n def get_24h_trade_statistics(self, symbol):\n check_symbol(symbol)\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n request = self.__create_request_by_get(\"/market/detail\", builder)\n\n def parse(json_wrapper):\n tick = json_wrapper.get_object(\"tick\")\n trade_statistics = TradeStatistics()\n trade_statistics.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n trade_statistics.amount = tick.get_float(\"amount\")\n trade_statistics.open = tick.get_float(\"open\")\n trade_statistics.close = tick.get_float(\"close\")\n trade_statistics.high = tick.get_float(\"high\")\n trade_statistics.low = tick.get_float(\"low\")\n trade_statistics.count = tick.get_int(\"count\")\n trade_statistics.volume = tick.get_float(\"vol\")\n return trade_statistics\n\n request.json_parser = parse\n return request\n\n def get_symbols(self):\n request = self.__create_request_by_get(\"/v1/common/symbols\", UrlParamsBuilder())\n\n def parse(json_wrapper):\n symbols = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n local_symbol = Symbol()\n local_symbol.base_currency = item.get_string(\"base-currency\")\n local_symbol.quote_currency = item.get_string(\"quote-currency\")\n local_symbol.price_precision = item.get_int(\"price-precision\")\n local_symbol.amount_precision = item.get_int(\"amount-precision\")\n local_symbol.symbol_partition = item.get_string(\"symbol-partition\")\n local_symbol.symbol = item.get_string(\"symbol\")\n symbols.append(local_symbol)\n return symbols\n\n request.json_parser = parse\n return request\n\n def get_currencies(self):\n request = self.__create_request_by_get(\"/v1/common/currencys\", UrlParamsBuilder())\n\n def parse(json_wrapper):\n return json_wrapper.get_array(\"data\").get_items_as_string()\n\n request.json_parser = parse\n return request\n\n def get_best_quote(self, symbol):\n check_symbol(symbol)\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n request = self.__create_request_by_get(\"/market/detail/merged\", builder)\n\n def parse(json_wrapper):\n best_quote = BestQuote()\n best_quote.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int(\"ts\"))\n tick = json_wrapper.get_object(\"tick\")\n ask_array = tick.get_array(\"ask\")\n best_quote.ask_price = ask_array.get_float_at(0)\n best_quote.ask_amount = ask_array.get_float_at(1)\n bid_array = tick.get_array(\"bid\")\n best_quote.bid_price = bid_array.get_float_at(0)\n best_quote.bid_amount = bid_array.get_float_at(1)\n return best_quote\n\n request.json_parser = parse\n return request\n\n def get_accounts(self):\n request = self.__create_request_by_post_with_signature(\"/api/v1/contract_account_info\", UrlParamsBuilder())\n\n def parse(json_wrapper):\n data_array = json_wrapper.get_array(\"data\")\n account = Account()\n account.account_type = 'future'\n for item in data_array.get_items():\n balance = Balance()\n balance.currency = item.get_string('symbol')\n balance.balance = item.get_float('margin_balance')\n balance.position = item.get_float('margin_position')\n balance.frozen = item.get_float('margin_frozen')\n balance.available = item.get_float('margin_available')\n balance.profit_real = item.get_float('profit_real')\n balance.profit_unreal = item.get_float('profit_unreal')\n account.balances.append(balance)\n return account\n request.json_parser = parse\n return request\n\n def get_withdraw_history(self, currency, from_id, size):\n check_currency(currency)\n check_should_not_none(from_id, \"from_id\")\n check_should_not_none(size, \"size\")\n\n builder = UrlParamsBuilder()\n builder.put_url(\"currency\", currency)\n builder.put_url(\"type\", \"withdraw\")\n builder.put_url(\"from\", from_id)\n builder.put_url(\"size\", size)\n request = self.__create_request_by_get_with_signature(\"/v1/query/deposit-withdraw\", builder)\n\n def parse(json_wrapper):\n withdraws = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n withdraw = Withdraw()\n withdraw.id = item.get_int(\"id\")\n withdraw.currency = item.get_string(\"currency\")\n withdraw.tx_hash = item.get_string(\"tx-hash\")\n withdraw.amount = item.get_float(\"amount\")\n withdraw.address = item.get_string(\"address\")\n withdraw.address_tag = item.get_string(\"address-tag\")\n withdraw.fee = item.get_float(\"fee\")\n withdraw.withdraw_state = item.get_string(\"state\")\n withdraw.created_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"created-at\"))\n withdraw.updated_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"updated-at\"))\n withdraws.append(withdraw)\n return withdraws\n\n request.json_parser = parse\n return request\n\n def get_deposit_history(self, currency, from_id, size):\n check_symbol(currency)\n check_should_not_none(from_id, \"from_id\")\n check_should_not_none(size, \"size\")\n\n builder = UrlParamsBuilder()\n builder.put_url(\"currency\", currency)\n builder.put_url(\"type\", \"deposit\")\n builder.put_url(\"from\", from_id)\n builder.put_url(\"size\", size)\n request = self.__create_request_by_get_with_signature(\"/v1/query/deposit-withdraw\", builder)\n\n def parse(json_wrapper):\n deposits = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n deposit = Deposit()\n deposit.id = item.get_int(\"id\")\n deposit.currency = item.get_string(\"currency\")\n deposit.tx_hash = item.get_string(\"tx-hash\")\n deposit.amount = item.get_float(\"amount\")\n deposit.address = item.get_string(\"address\")\n deposit.address_tag = item.get_string(\"address-tag\")\n deposit.fee = item.get_float(\"fee\")\n deposit.withdraw_state = item.get_string(\"state\")\n deposit.created_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"created-at\"))\n deposit.updated_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"updated-at\"))\n deposits.append(deposit)\n return deposits\n\n request.json_parser = parse\n return request\n\n def get_balance(self, account):\n path = \"/v1/account/accounts/{}/balance\"\n path = path.format(account.id)\n request = self.__create_request_by_get_with_signature(path, UrlParamsBuilder())\n\n def parse(json_wrapper):\n balances = list()\n data = json_wrapper.get_object(\"data\")\n list_array = data.get_array(\"list\")\n for item in list_array.get_items():\n balance = Balance()\n balance.balance = item.get_float(\"balance\")\n balance.currency = item.get_string(\"currency\")\n balance.balance_type = item.get_string(\"type\")\n balances.append(balance)\n return balances\n\n request.json_parser = parse\n return request\n\n def transfer(self, symbol, from_account, to_account, currency, amount):\n check_symbol(symbol)\n check_should_not_none(from_account, \"from_account\")\n check_should_not_none(to_account, \"to_account\")\n check_should_not_none(currency, \"currency\")\n check_should_not_none(amount, \"amount\")\n if from_account == AccountType.SPOT and to_account == AccountType.MARGIN:\n address = \"/v1/dw/transfer-in/margin\"\n elif from_account == AccountType.MARGIN and AccountType.SPOT:\n address = \"/v1/dw/transfer-out/margin\"\n else:\n raise HuobiApiException(HuobiApiException.INPUT_ERROR, \"[Input] incorrect transfer type\")\n builder = UrlParamsBuilder()\n builder.put_post(\"currency\", currency)\n builder.put_post(\"symbol\", symbol)\n builder.put_post(\"amount\", amount)\n request = self.__create_request_by_post_with_signature(address, builder)\n\n def parse(json_wrapper):\n if json_wrapper.get_string(\"status\") == \"ok\":\n return json_wrapper.get_int(\"data\")\n\n request.json_parser = parse\n return request\n\n def apply_loan(self, symbol, currency, amount):\n check_symbol(symbol)\n check_should_not_none(currency, \"currency\")\n check_should_not_none(amount, \"amount\")\n builder = UrlParamsBuilder()\n builder.put_post(\"currency\", currency)\n builder.put_post(\"symbol\", symbol)\n builder.put_post(\"amount\", amount)\n request = self.__create_request_by_post_with_signature(\"/v1/margin/orders\", builder)\n\n def parse(json_wrapper):\n return json_wrapper.get_int(\"data\")\n\n request.json_parser = parse\n return request\n\n def repay_loan(self, load_id, amount):\n check_should_not_none(load_id, \"load_id\")\n check_should_not_none(amount, \"amount\")\n builder = UrlParamsBuilder()\n builder.put_post(\"amount\", amount)\n path = \"/v1/margin/orders/{}/repay\"\n path = path.format(load_id)\n request = self.__create_request_by_post_with_signature(path, builder)\n\n def parse(json_wrapper):\n return json_wrapper.get_int(\"data\")\n\n request.json_parser = parse\n return request\n\n def get_loan(self, symbol, start_date=None, end_date=None, states=None, from_id=None, size=None, direction=None):\n check_symbol(symbol)\n start_date = format_date(start_date, \"start_date\")\n end_date = format_date(end_date, \"end_date\")\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"start-date\", start_date)\n builder.put_url(\"end-date\", end_date)\n builder.put_url(\"states\", states)\n builder.put_url(\"from\", from_id)\n builder.put_url(\"size\", size)\n builder.put_url(\"direct\", direction)\n request = self.__create_request_by_get_with_signature(\"/v1/margin/loan-orders\", builder)\n\n def parse(json_wrapper):\n loan_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n loan = Loan()\n loan.loan_balance = item.get_float(\"loan-balance\")\n loan.interest_balance = item.get_float(\"interest-balance\")\n loan.interest_rate = item.get_float(\"interest-rate\")\n loan.loan_amount = item.get_float(\"loan-amount\")\n loan.interest_amount = item.get_float(\"interest-amount\")\n loan.symbol = item.get_string(\"symbol\")\n loan.currency = item.get_string(\"currency\")\n loan.id = item.get_int(\"id\")\n loan.state = item.get_string(\"state\")\n loan.account_type = account_info_map.get_account_by_id(self.__api_key,\n item.get_int(\"account-id\")).account_type\n loan.user_id = item.get_int(\"user-id\")\n loan.accrued_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"accrued-at\"))\n loan.created_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"created-at\"))\n loan_list.append(loan)\n return loan_list\n\n request.json_parser = parse\n return request\n\n def create_order(self, symbol, account_type, order_type, amount, price):\n check_symbol(symbol)\n check_should_not_none(account_type, \"account_type\")\n check_should_not_none(order_type, \"order_type\")\n check_should_not_none(amount, \"amount\")\n if order_type == OrderType.SELL_LIMIT \\\n or order_type == OrderType.BUY_LIMIT \\\n or order_type == OrderType.BUY_LIMIT_MAKER \\\n or order_type == OrderType.SELL_LIMIT_MAKER:\n check_should_not_none(price, \"price\")\n if order_type == OrderType.SELL_MARKET or order_type == OrderType.BUY_MARKET:\n check_should_none(price, \"price\")\n global account_info_map\n user = account_info_map.get_user(self.__api_key)\n account = user.get_account_by_type(account_type)\n source = \"api\"\n if account_type == AccountType.MARGIN:\n source = \"margin-api\"\n builder = UrlParamsBuilder()\n builder.put_post(\"account-id\", account.id)\n builder.put_post(\"amount\", amount)\n builder.put_post(\"price\", price)\n builder.put_post(\"symbol\", symbol)\n builder.put_post(\"type\", order_type)\n builder.put_post(\"source\", source)\n request = self.__create_request_by_post_with_signature(\"/v1/order/orders/place\", builder)\n\n def parse(json_wrapper):\n return json_wrapper.get_int(\"data\")\n\n request.json_parser = parse\n return request\n\n def get_open_orders(self, symbol, account_type, size=None, side=None):\n check_symbol(symbol)\n check_range(size, 1, 2000, \"size\")\n check_should_not_none(account_type, \"account_type\")\n global account_info_map\n user = account_info_map.get_user(self.__api_key)\n account = user.get_account_by_type(account_type)\n builder = UrlParamsBuilder()\n builder.put_url(\"account-id\", account.id)\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"side\", side)\n builder.put_url(\"size\", size)\n request = self.__create_request_by_get_with_signature(\"/v1/order/openOrders\", builder)\n\n def parse(json_wrapper):\n order_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n order = Order()\n order.order_id = item.get_int(\"id\")\n order.symbol = item.get_string(\"symbol\")\n order.price = item.get_float(\"price\")\n order.amount = item.get_float(\"amount\")\n order.account_type = account_info_map.get_account_by_id(self.__api_key,\n item.get_int(\"account-id\")).account_type\n order.created_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"created-at\"))\n order.order_type = item.get_string(\"type\")\n order.filled_amount = item.get_float(\"filled-amount\")\n order.filled_cash_amount = item.get_float(\"filled-cash-amount\")\n order.filled_fees = item.get_float(\"filled-fees\")\n order.source = item.get_string(\"source\")\n order.state = item.get_string(\"state\")\n order_list.append(order)\n return order_list\n\n request.json_parser = parse\n return request\n\n def cancel_order(self, symbol, order_id):\n check_symbol(symbol)\n check_should_not_none(order_id, \"order_id\")\n path = \"/v1/order/orders/{}/submitcancel\"\n path = path.format(order_id)\n request = self.__create_request_by_post_with_signature(path, UrlParamsBuilder())\n\n def parse(json_wrapper):\n return\n\n request.json_parser = parse\n return request\n\n def cancel_orders(self, symbol, order_id_list):\n check_symbol(symbol)\n check_should_not_none(order_id_list, \"order_id_list\")\n check_list(order_id_list, 1, 50, \"order_id_list\")\n string_list = list()\n for order_id in order_id_list:\n string_list.append(str(order_id))\n builder = UrlParamsBuilder()\n builder.put_post(\"order-ids\", string_list)\n request = self.__create_request_by_post_with_signature(\"/v1/order/orders/batchcancel\", builder)\n\n def parse(json_wrapper):\n return\n\n request.json_parser = parse\n return request\n\n def cancel_open_orders(self, symbol, account_type, side=None, size=None):\n check_symbol(symbol)\n check_should_not_none(account_type, \"account_type\")\n global account_info_map\n user = account_info_map.get_user(self.__api_key)\n account = user.get_account_by_type(account_type)\n builder = UrlParamsBuilder()\n builder.put_post(\"account-id\", account.id)\n builder.put_post(\"symbol\", symbol)\n builder.put_post(\"side\", side)\n builder.put_post(\"size\", size)\n request = self.__create_request_by_post_with_signature(\"/v1/order/orders/batchCancelOpenOrders\", builder)\n\n def parse(json_wrapper):\n data = json_wrapper.get_object(\"data\")\n batch_cancel_result = BatchCancelResult()\n batch_cancel_result.success_count = data.get_int(\"success-count\")\n batch_cancel_result.failed_count = data.get_int(\"failed-count\")\n return batch_cancel_result\n\n request.json_parser = parse\n return request\n\n def get_order(self, symbol, order_id):\n check_symbol(symbol)\n check_should_not_none(order_id, \"order_id\")\n path = \"/v1/order/orders/{}\"\n path = path.format(order_id)\n request = self.__create_request_by_get_with_signature(path, UrlParamsBuilder())\n\n def parse(json_wrapper):\n data = json_wrapper.get_object(\"data\")\n order = Order()\n order.order_id = data.get_int(\"id\")\n order.symbol = data.get_string(\"symbol\")\n order.price = data.get_float(\"price\")\n order.amount = data.get_float(\"amount\")\n order.account_type = account_info_map.get_account_by_id(self.__api_key,\n data.get_int(\"account-id\")).account_type\n order.created_timestamp = convert_cst_in_millisecond_to_utc(data.get_int(\"created-at\"))\n order.canceled_timestamp = convert_cst_in_millisecond_to_utc(data.get_int(\"canceled-at\"))\n order.finished_timestamp = convert_cst_in_millisecond_to_utc(data.get_int(\"finished-at\"))\n order.order_type = data.get_string(\"type\")\n order.filled_amount = data.get_float(\"field-amount\")\n order.filled_cash_amount = data.get_float(\"field-cash-amount\")\n order.filled_fees = data.get_float(\"field-fees\")\n order.source = data.get_string(\"source\")\n order.state = data.get_string(\"state\")\n return order\n\n request.json_parser = parse\n return request\n\n def get_match_results_by_order_id(self, symbol, order_id):\n check_symbol(symbol)\n check_should_not_none(order_id, \"order_id\")\n path = \"/v1/order/orders/{}/matchresults\"\n path = path.format(order_id)\n request = self.__create_request_by_get_with_signature(path, UrlParamsBuilder())\n\n def parse(json_wrapper):\n match_result_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n match_result = MatchResult()\n match_result.id = item.get_int(\"id\")\n match_result.created_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"created-at\"))\n match_result.filled_amount = item.get_float(\"filled-amount\")\n match_result.filled_fees = item.get_float(\"filled-fees\")\n match_result.match_id = item.get_int(\"match-id\")\n match_result.order_id = item.get_int(\"order-id\")\n match_result.price = item.get_float(\"price\")\n match_result.source = item.get_string(\"source\")\n match_result.symbol = item.get_string(\"symbol\")\n match_result.order_type = item.get_string(\"type\")\n match_result_list.append(match_result)\n return match_result_list\n\n request.json_parser = parse\n return request\n\n def get_match_results(self, symbol, order_type=None, start_date=None, end_date=None, size=None, from_id=None):\n check_symbol(symbol)\n start_date = format_date(start_date, \"start_date\")\n end_date = format_date(end_date, \"end_date\")\n check_range(size, 1, 100, \"size\")\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"types\", order_type)\n builder.put_url(\"start-date\", start_date)\n builder.put_url(\"end-date\", end_date)\n builder.put_url(\"from\", from_id)\n builder.put_url(\"size\", size)\n request = self.__create_request_by_get_with_signature(\"/v1/order/matchresults\", builder)\n\n def parse(json_wrapper):\n match_result_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n match_result = MatchResult()\n match_result.id = item.get_int(\"id\")\n match_result.created_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"created-at\"))\n match_result.filled_amount = item.get_float(\"filled-amount\")\n match_result.filled_fees = item.get_float(\"filled-fees\")\n match_result.match_id = item.get_int(\"match-id\")\n match_result.order_id = item.get_int(\"order-id\")\n match_result.price = item.get_float(\"price\")\n match_result.source = item.get_string(\"source\")\n match_result.symbol = item.get_string(\"symbol\")\n match_result.order_type = item.get_string(\"type\")\n match_result_list.append(match_result)\n return match_result_list\n\n request.json_parser = parse\n return request\n\n def withdraw(self, address, amount, currency, fee=None, address_tag=None):\n check_symbol(currency)\n check_should_not_none(address, \"address\")\n check_should_not_none(amount, \"amount\")\n builder = UrlParamsBuilder()\n builder.put_post(\"address\", address)\n builder.put_post(\"amount\", amount)\n builder.put_post(\"currency\", currency)\n builder.put_post(\"fee\", fee)\n builder.put_post(\"addr-tag\", address_tag)\n\n request = self.__create_request_by_post_with_signature(\"/v1/dw/withdraw/api/create\", builder)\n\n def parse(json_wrapper):\n return json_wrapper.get_int(\"data\")\n\n request.json_parser = parse\n return request\n\n def cancel_withdraw(self, currency, withdraw_id):\n check_symbol(currency)\n check_should_not_none(withdraw_id, \"withdraw_id\")\n path = \"/v1/dw/withdraw-virtual/{}/cancel\"\n path = path.format(withdraw_id)\n request = self.__create_request_by_post_with_signature(path, UrlParamsBuilder())\n\n def parse(json_wrapper):\n return\n\n request.json_parser = parse\n return request\n\n def get_historical_orders(self, symbol, order_state, order_type=None, start_date=None, end_date=None, start_id=None,\n size=None):\n check_symbol(symbol),\n check_should_not_none(order_state, \"order_state\")\n start_date = format_date(start_date, \"start_date\")\n end_date = format_date(end_date, \"end_date\")\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"types\", order_type)\n builder.put_url(\"start-date\", start_date)\n builder.put_url(\"end-date\", end_date)\n builder.put_url(\"from\", start_id)\n builder.put_url(\"states\", order_state)\n builder.put_url(\"size\", size)\n request = self.__create_request_by_get_with_signature(\"/v1/order/orders\", builder)\n\n def parse(json_wrapper):\n order_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n order = Order()\n order.account_type = account_info_map.get_account_by_id(self.__api_key,\n item.get_int(\"account-id\")).account_type\n order.amount = item.get_float(\"amount\")\n order.canceled_timestamp = convert_cst_in_millisecond_to_utc(item.get_int_or_default(\"canceled-at\", 0))\n order.finished_timestamp = convert_cst_in_millisecond_to_utc(item.get_int_or_default(\"finished-at\", 0))\n order.order_id = item.get_int(\"id\")\n order.symbol = item.get_string(\"symbol\")\n order.price = item.get_float(\"price\")\n order.created_timestamp = convert_cst_in_millisecond_to_utc(item.get_int(\"created-at\"))\n order.order_type = item.get_string(\"type\")\n order.filled_amount = item.get_float(\"field-amount\")\n order.filled_cash_amount = item.get_float(\"field-cash-amount\")\n order.filled_fees = item.get_float(\"field-fees\")\n order.source = item.get_string(\"source\")\n order.state = item.get_string(\"state\")\n order_list.append(order)\n return order_list\n\n request.json_parser = parse\n return request\n\n def transfer_between_parent_and_sub(self, sub_uid, currency, amount, transfer_type):\n check_currency(currency)\n check_should_not_none(sub_uid, \"sub_uid\")\n check_should_not_none(amount, \"amount\")\n check_should_not_none(transfer_type, \"transfer_type\")\n builder = UrlParamsBuilder()\n builder.put_post(\"sub-uid\", sub_uid)\n builder.put_post(\"amount\", amount)\n builder.put_post(\"currency\", currency)\n builder.put_post(\"type\", transfer_type)\n request = self.__create_request_by_post_with_signature(\"/v1/subuser/transfer\", builder)\n\n def parse(json_wrapper):\n return json_wrapper.get_int(\"data\")\n\n request.json_parser = parse\n return request\n\n def get_current_user_aggregated_balance(self):\n request = self.__create_request_by_get_with_signature(\"/v1/subuser/aggregate-balance\", UrlParamsBuilder())\n\n def parse(json_wrapper):\n balances = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items:\n balance = Balance()\n balance.currency = item.get_string(\"currency\")\n balance.balance = item.get_float(\"balance\")\n balances.append(balance)\n return balances\n\n request.json_parser = parse\n return request\n\n def get_specify_account_balance(self, sub_id):\n path = \"/v1/account/accounts/{}\"\n path = path.format(sub_id)\n request = self.__create_request_by_get_with_signature(path, UrlParamsBuilder())\n\n def parse(json_wrapper):\n complete_sub_account_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n complete_sub_account = CompleteSubAccountInfo()\n complete_sub_account.id = item.get_int(\"id\")\n complete_sub_account.account_type = item.get_string(\"type\")\n balances = list()\n data_array_in = item.get_array(\"list\")\n for item_in in data_array_in.get_items():\n balance = Balance()\n balance.currency = item_in.get_string(\"currency\")\n balance.type = item_in.get_string(\"type\")\n balance.balance = item_in.get_float(\"balance\")\n balances.append(balance)\n complete_sub_account.balances = balances\n complete_sub_account_list.append(complete_sub_account)\n return complete_sub_account_list\n\n request.json_parser = parse\n return request\n\n def get_etf_candlestick(self, symbol, interval, size=None):\n check_symbol(symbol)\n check_range(size, 1, 2000, \"size\")\n check_should_not_none(interval, \"interval\")\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n builder.put_url(\"period\", interval)\n builder.put_url(\"limit\", size)\n request = self.__create_request_by_get(\"/quotation/market/history/kline\", builder)\n\n def parse(json_wrapper):\n candlestick_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n candlestick = Candlestick()\n candlestick.open = item.get_float(\"open\")\n candlestick.close = item.get_float(\"close\")\n candlestick.low = item.get_float(\"low\")\n candlestick.high = item.get_float(\"high\")\n candlestick.amount = item.get_float(\"amount\")\n candlestick.count = 0\n candlestick.volume = item.get_float(\"vol\")\n candlestick_list.append(candlestick)\n return candlestick_list\n\n request.json_parser = parse\n return request\n\n def get_etf_swap_config(self, etf_symbol):\n check_symbol(etf_symbol)\n builder = UrlParamsBuilder()\n builder.put_url(\"etf_name\", etf_symbol)\n request = self.__create_request_by_get(\"/etf/swap/config\", builder)\n\n def parse(json_wrapper):\n data = json_wrapper.get_object(\"data\")\n etf_swap_config = EtfSwapConfig()\n etf_swap_config.purchase_max_amount = data.get_int(\"purchase_max_amount\")\n etf_swap_config.purchase_min_amount = data.get_int(\"purchase_min_amount\")\n etf_swap_config.redemption_max_amount = data.get_int(\"redemption_max_amount\")\n etf_swap_config.redemption_min_amount = data.get_int(\"redemption_min_amount\")\n etf_swap_config.purchase_fee_rate = data.get_float(\"purchase_fee_rate\")\n etf_swap_config.redemption_fee_rate = data.get_float(\"redemption_fee_rate\")\n etf_swap_config.status = data.get_string(\"etf_status\")\n unit_price_data_array = data.get_array(\"unit_price\")\n unit_price_list = list()\n for item in unit_price_data_array.get_items():\n unit_price = UnitPrice()\n unit_price.currency = item.get_string(\"currency\")\n unit_price.amount = item.get_float(\"amount\")\n unit_price_list.append(unit_price)\n etf_swap_config.unit_price_list = unit_price_list\n return etf_swap_config\n\n request.json_parser = parse\n return request\n\n def etf_swap(self, etf_symbol, amount, swap_type):\n check_symbol(etf_symbol)\n check_should_not_none(amount, \"amount\")\n check_should_not_none(swap_type, \"swap_type\")\n builder = UrlParamsBuilder()\n builder.put_post(\"etf_name\", etf_symbol)\n builder.put_post(\"amount\", amount)\n if swap_type == EtfSwapType.IN:\n request = self.__create_request_by_post_with_signature(\"/etf/swap/in\", builder)\n else:\n request = self.__create_request_by_post_with_signature(\"/etf/swap/out\", builder)\n\n def parse():\n return\n\n request.json_parser = parse\n return request\n\n def get_etf_swap_history(self, etf_symbol, offset, size):\n check_symbol(etf_symbol)\n check_range(size, 1, 100, \"size\")\n greater_or_equal(offset, 0, \"offset\")\n builder = UrlParamsBuilder()\n builder.put_url(\"etf_name\", etf_symbol)\n builder.put_url(\"offset\", offset)\n builder.put_url(\"limit\", size)\n request = self.__create_request_by_get_with_signature(\"/etf/swap/list\", builder)\n\n def parse(json_wrapper):\n etf_swap_history_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item in data_array.get_items():\n etf_swap_history = EtfSwapHistory()\n etf_swap_history.created_timestamp = item.get_int(\"gmt_created\")\n etf_swap_history.currency = item.get_string(\"currency\")\n etf_swap_history.amount = item.get_float(\"amount\")\n etf_swap_history.type = item.get_string(\"type\")\n etf_swap_history.status = item.get_int(\"status\")\n detail = item.get_object(\"detail\")\n etf_swap_history.rate = detail.get_float(\"rate\")\n etf_swap_history.fee = detail.get_float(\"fee\")\n etf_swap_history.point_card_amount = detail.get_float(\"point_card_amount\")\n used_currency_array = detail.get_array(\"used_currency_list\")\n used_currency_list = list()\n for currency in used_currency_array.get_items():\n unit_price = UnitPrice()\n unit_price.amount = currency.get_float(\"amount\")\n unit_price.currency = currency.get_string(\"currency\")\n used_currency_list.append(unit_price)\n etf_swap_history.used_currency_list = used_currency_list\n obtain_currency_array = detail.get_array(\"obtain_currency_list\")\n obtain_currency_list = list()\n for currency in obtain_currency_array.get_items():\n unit_price = UnitPrice()\n unit_price.amount = currency.get_float(\"amount\")\n unit_price.currency = currency.get_string(\"currency\")\n obtain_currency_list.append(unit_price)\n etf_swap_history.obtain_currency_list = obtain_currency_list\n etf_swap_history_list.append(etf_swap_history)\n return etf_swap_history_list\n\n request.json_parser = parse\n return request\n\n def get_margin_balance_detail(self, symbol):\n check_symbol(symbol)\n builder = UrlParamsBuilder()\n builder.put_url(\"symbol\", symbol)\n request = self.__create_request_by_get_with_signature(\"/v1/margin/accounts/balance\", builder)\n\n def parse(json_wrapper):\n margin_balance_detail_list = list()\n data_array = json_wrapper.get_array(\"data\")\n for item_in_data in data_array.get_items():\n margin_balance_detail = MarginBalanceDetail()\n margin_balance_detail.id = item_in_data.get_int(\"id\")\n margin_balance_detail.type = item_in_data.get_string(\"type\")\n margin_balance_detail.symbol = item_in_data.get_string(\"symbol\")\n margin_balance_detail.state = item_in_data.get_string(\"state\")\n margin_balance_detail.fl_price = item_in_data.get_float(\"fl-price\")\n margin_balance_detail.fl_type = item_in_data.get_string(\"fl-type\")\n margin_balance_detail.risk_rate = item_in_data.get_float(\"risk-rate\")\n balance_list = list()\n list_array = item_in_data.get_array(\"list\")\n for item_in_list in list_array.get_items():\n balance = Balance()\n balance.currency = item_in_list.get_string(\"currency\")\n balance.balance_type = item_in_list.get_string(\"type\")\n balance.balance = item_in_list.get_float(\"balance\")\n balance_list.append(balance)\n margin_balance_detail.sub_account_balance_list = balance_list\n margin_balance_detail_list.append(margin_balance_detail)\n return margin_balance_detail_list\n\n request.json_parser = parse\n return request\n\n","repo_name":"jxu86/huobi_future","sub_path":"huobi_future/impl/restapirequestimpl.py","file_name":"restapirequestimpl.py","file_ext":"py","file_size_in_byte":42151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"41436306778","text":"from datenanalyse.formatting.typings import Simulation\n\nrhoLuft = 1.225 # [kg/m^3] Dichte von Luft stratoflights.com: 1.2050\nrhoHelium = 0.1855 # [kg/m^3] Dichte von Helium\n# VHelium = Simulation.heAmount # [m^3] im Ballon\npHelium = 101300 # [pa] am Boden\nTGas = 293.15 # [K] in der Flasche\nMHelium = 4*10**(-3) # [g/mol] Molare Masse Helium\nmBallon = 1.6 # [kg] Masse Ballon\n# mNutzlast = Simulation.mNutzlast # [kg] Masse Nutzlast\ncwWert = 0.25 # von stratoflights.com; CW = 0,45 für Kugel, könnte für den Ballon etwas geringer sein …\nH = 7238.3 # [m] von stratoflights.com oder 7990 laut Wikipedia, Wert für T = 15°C https://de.wikipedia.org/wiki/Barometrische_H%C3%B6henformel\nh = 100 # [m] Höhe Lützenkirchen\navogadroscheZahl = 6.0221415 * 10**23\nboltzmannkonstante = 1.3806503 * 10**-23 # [J*K^-1] aka k\n# NBallon = (VHelium * pHelium)/(boltzmannkonstante * TGas)\n# mHelium = NBallon * 6.6464731 * 10**-27\n# mges = mBallon + mNutzlast + mHelium # [kg] Gesamtmasse Ballon\nerdradius = 6371000 # [m] Erdradius auf Meereshöhe\ndeltat = 0.05 # [s]\nfitting = 0.87 # Faktor zur Berechn. von V\ngKonstante = 6.673 * 10**-11 # [m^3 * kg^-1 * s^-2] Gravitationskonstante\nmErde = 5.97 * 10**24 # [kg] Erdmasse","repo_name":"0x4c46/Datenauswertung-Stratosphaerenballonflug","sub_path":"lib/datenanalyse/meta/konstantenCSV.py","file_name":"konstantenCSV.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"21760492656","text":"import torch\n\n\ndef gram_matrix(x, normalize=True):\n (b, ch, h, w) = x.size()\n features = x.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t)\n if normalize:\n gram /= ch * h * w\n return gram\n\n\ndef get_smooth_loss(x):\n sl = torch.sum(torch.abs(x[:, :, :, :-1] - x[:, :, :, 1:])) + torch.sum(\n torch.abs(x[:, :, :-1, :] - x[:, :, 1:, :])) + torch.sum(\n torch.abs(x[:, :, :-1, :-1] - x[:, :, 1:, 1:])) + torch.sum(\n torch.abs(x[:, :, :-1, 1:] - x[:, :, 1:, :-1]))\n return sl\n\n\ndef compute_loss(vgg_model, optimizing_img, gt_features, content_feature_idx,\n style_feature_idx, config):\n content_gt_feature = gt_features[0]\n style_gt_feature = gt_features[1]\n\n current_feature = vgg_model(optimizing_img)\n\n content_feature = current_feature[content_feature_idx].squeeze(axis=0)\n content_loss = torch.nn.MSELoss(reduction='mean')(content_gt_feature,\n content_feature)\n\n style_loss = 0.0\n style_feature = [\n gram_matrix(x) for cnt, x in enumerate(current_feature)\n if cnt in style_feature_idx\n ]\n for gram_gt, gram_hat in zip(style_gt_feature, style_feature):\n style_loss += torch.nn.MSELoss(reduction='sum')(gram_gt[0],\n gram_hat[0])\n style_loss /= len(style_gt_feature)\n\n smooth_loss = get_smooth_loss(optimizing_img)\n\n total_loss = config['alpha'] * content_loss + config[\n 'beta'] * style_loss + config['gamma'] * smooth_loss\n\n return total_loss, content_loss, style_loss, smooth_loss\n","repo_name":"WillKen/Image-Style-Transfer-VGG","sub_path":"modules/loss_func.py","file_name":"loss_func.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"72379981607","text":"from .models import Task\nimport azure.ai.vision as sdk\nimport os\n\n\ndef visionPro(last_url,last_id):\n service_options = sdk.VisionServiceOptions(os.environ[\"VISION_ENDPOINT\"],\n os.environ[\"VISION_KEY\"])\n vision_source = sdk.VisionSource(\n url=last_url)\n analysis_options = sdk.ImageAnalysisOptions()\n analysis_options.features = (\n sdk.ImageAnalysisFeature.TEXT \n )\n\n analysis_options.language = \"en\"\n\n analysis_options.gender_neutral_caption = True\n\n image_analyzer = sdk.ImageAnalyzer(service_options, vision_source, analysis_options)\n\n result = image_analyzer.analyze()\n palabras=\"\"\n if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:\n for line in result.text.lines:\n palabras+=line.content\n\n last_task = Task.objects.get(id=last_id)\n last_task.texto = palabras\n last_task.save()\n\n\ndef ultimoUrl():\n last_task = Task.objects.latest('id')\n last_url = last_task.url\n last_id = last_task.id\n return last_url,last_id\n\n","repo_name":"jefryne/vision_pro_texto","sub_path":"task/newText.py","file_name":"newText.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30521502588","text":"\nimport ray\nimport os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport sys\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n# Import utilites\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\nfrom HelperFunctionParking import *\nfrom scipy.spatial import distance\nfrom itertools import takewhile\nimport multiprocessing as mp\n# Name of the directory containing the object detection module we're using\nMODEL_NAME = 'inference_graph'\n# Grab path to current working directory\nCWD_PATH = os.getcwd()\n\n# Path to frozen detection graph .pb file, which contains the model that is used\n# for object detection.\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\n\n# Path to label map file\nPATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\n\n# Path to video\n# Number of classes the object detector can identify\nNUM_CLASSES = 6\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n# Load the Tensorflow model into memory.\n\n\n\n\n\n# Open video file\n\n\n\n#p = mp.Process(target=run)\n#p.start()\n\nnxt=1\ncollidedRecs = []\nindexes = []\nanyRecContain = False\ni = 0\nCL1=[]\nCL2=[]\nprevCL1={}\nMovingThreashold=15\nindex=0\ndef CalculateDiff(prevCL1):\n return distance.euclidean((prevCL1[0][0],prevCL1[0][1]),(prevCL1[len(prevCL1)-1][0],prevCL1[len(prevCL1)-1][1]))\n\n@ray.remote(num_gpus=0.38)\nclass TrainingActor(object):\n def __init__(self, seed):\n print('Set new seed:', seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n self.gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.38)\n #self.mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)\n\n # # Setting up the softmax architecture.\n # self.x = tf.placeholder('float', [None, 784])\n # W = tf.Variable(tf.zeros([784, 10]))\n # b = tf.Variable(tf.zeros([10]))\n # self.y = tf.nn.softmax(tf.matmul(self.x, W) + b)\n #\n # # Setting up the cost function.\n # self.y_ = tf.placeholder('float', [None, 10])\n # cross_entropy = -tf.reduce_sum(self.y_*tf.log(self.y))\n # self.train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n\n # Initialization\n\n self.init = tf.initialize_all_variables()\n self.sess1 = tf.Session(config=tf.ConfigProto(gpu_options=self.gpu_options))\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self.sess1 = tf.Session(config=tf.ConfigProto(gpu_options=self.gpu_options, inter_op_parallelism_threads=2,\n intra_op_parallelism_threads=2), graph=self.detection_graph)\n\n \n def train(self,path, name):\n\n video = cv2.VideoCapture(path)\n global gpu_options\n global detection_graph\n\n\n global index\n # Define input and output tensors (i.e. data) for the object detection classifier\n\n # Input tensor is the image\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n\n # Output tensors are the detection boxes, scores, and classes\n # Each box represents a part of the image where a particular object was detected\n detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n\n # Each score represents level of confidence for each of the objects.\n # The score is shown on the result image, together with the class label.\n detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n\n # Number of objects detected\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n while (video.isOpened()):\n print(name)\n\n ret, frame = video.read()\n frame = cv2.resize(frame,(720,480),frame, interpolation=cv2.INTER_AREA)\n\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n\n # if index%2==0:\n frame_expanded = np.expand_dims(frame, axis=0)\n\n # Perform the actual detection by running the model with the image as input\n #\n\n (boxes, scores, classes, num) = self.sess1.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n\n # Draw the results of the detection (aka 'visulaize the results')\n index = index + 1\n frame, boxes, labels = vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=2,\n min_score_thresh=0.70)\n # All the results have been drawn on the frame, so it's time to display it.\n labelsName = ['car', 'van', 'truck', 'bus', 'motorcycle', 'rickshaw']\n labelCount = []\n displayString = ''\n for label in labelsName: count = len(\n [x for x in list(labels.values()) if len(x) > 0 and label in x[0]]);labelCount.append(count);\n resizedBoxes = [(x[1] * frame.shape[1], x[0] * frame.shape[0], x[3] * frame.shape[1], x[2] * frame.shape[0])\n for\n x in boxes]\n # frame1 = cv2.resize(frame,None,fx=0.60, fy=0.60, interpolation=cv2.INTER_AREA)\n frame1 = frame\n i = 0\n TotalMoving=0\n\n for rect in resizedBoxes:\n showIndex = 0\n MovingStatus = \"Static\"\n\n if len(CL1) == 0:\n CL1.append(rect)\n prevCL1[len(CL1) - 1] = []\n showIndex = i\n i = i + 1\n indexes.append(i)\n\n\n else:\n index, found = CheckIOU(CL1, rect)\n if not found:\n CL1.append(rect)\n prevCL1[len(CL1) - 1] = []\n showIndex = i\n i = i + 1\n indexes.append(i)\n\n elif found:\n if len(prevCL1[index]) <= 10:\n prevCL1[index].append(rect)\n else:\n prevCL1[index].pop(0)\n prevCL1[index].append(rect)\n\n if (CalculateDiff(prevCL1[index]) > MovingThreashold):\n MovingStatus = \"Moving\"\n TotalMoving+=1\n\n CL1[index] = rect\n indexes.append(index)\n showIndex = index\n\n cv2.rectangle(frame1, (int(rect[0]), int(rect[1])), (int(rect[2]), int(rect[3])), color=(200, 200, 200),\n thickness=1)\n cv2.putText(frame1, str(showIndex), (int(rect[0]), int(rect[1]) - 10), 1, 1, (0, 0, 0))\n cv2.putText(frame1, str(MovingStatus), (int(rect[0]) + 10, int(rect[1] - 10) - 10), 1, 1, (0, 0, 0))\n\n cv2.rectangle(frame1, (0, 0), (150, 200), color=(200, 200, 200), thickness=-1)\n y = 5\n for i in range(len(labelsName)):\n y = y + 20\n displayString = labelsName[i] + ':' + str(labelCount[i])\n cv2.putText(frame1, displayString, (5, y), 1, 1, (0, 0, 0))\n cv2.putText(frame1, 'Moving: '+ str(TotalMoving), (5, y+15), 1, 1, (0, 0, 0))\n cv2.putText(frame1, 'Static: ' + str(len(resizedBoxes)-TotalMoving), (5, y + 35), 1, 1, (0, 0, 0))\n cv2.imshow(name, frame1)\n\n # Press 'q' to quit\n if cv2.waitKey(1) == ord('q'):\n break\n\n\n ############################################################33\n\n\n return path\nif __name__ == '__main__':\n # Start Ray.\n ray.init(num_gpus=1)\n path=[]\n name=[]\n path.append('VehicleSimulation.mp4')\n name.append('video1')\n path.append('LahoreSafeCity.mp4')\n name.append('video2')\n b=0\n\n # Create 3 actors.\n training_actors = [TrainingActor.remote(seed) for seed in range(2)]\n\n # Make them all train in parallel.\n #accuracy_ids = [actor.train.remote() for actor in training_actors]\n accuracy_ids = {i + 1: [training_actors[i].train.remote(path[i],name[i])] for i in range(2)}\n\n print(ray.get(accuracy_ids[1]))\n print(ray.get(accuracy_ids[2]))\n\n # Start new training runs in parallel.\n #accuracy_ids = [actor.train.remote('abc') for actor in training_actors]\n #print(ray.get(accuracy_ids))\n","repo_name":"CVML-KICS/CVML-20-1001","sub_path":"Code/RayFile.py","file_name":"RayFile.py","file_ext":"py","file_size_in_byte":9387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21246084000","text":"import random\nimport telebot\n\n# наш токен\ntoken = \"\"\n\n# переменная, в которой будут содержаться всем функции, которые нам нужны для обработки и ответа на сообщения\nbot = telebot.TeleBot(token)\n\nRANDOM_TASKS = [\"Сходить в магазин\", \"Посмотреть фильм\", \"Покормить кошку\", \"Помыть машину\", \"Пропылесосить\"]\n\nHELP = \"\"\"\n# /help - вывести список доступных команд\n# /start - приветствие\n# /add or /todo - добавить задачу в список (название задачи запрашиваем у пользователя)\n# /show - напечатать все добавленные задачи\n# /random - добавить случайную задачу на дату Сегодня\n# /exit - выход\n\"\"\"\n\ntasks = {}\n\n# функция для проверки и добавления даты(ключ) в словарь и задачи в список задач(значение) \ndef add_todo(date, task):\n date = date.lower()\n if date in tasks:\n # Дата есть в словаре\n # Добавляем в список задачу\n tasks[date].append(task)\n else:\n tasks[date] = [task]\n\n# регистрация функции и сама функция команды \"help\"\n@bot.message_handler(commands=[\"help\"])\ndef help(message):\n bot.send_message(message.chat.id, HELP)\n\n# регистрация функции и сама функция команды \"start\"\n@bot.message_handler(commands=[\"start\"])\ndef help(message):\n bot.send_message(message.chat.id,\"Привет ✌️ \")\n bot.send_message(message.chat.id,\"Планирование -наше всё, приступим? ✍🏻\")\n\n# регистрация функции и сама функция команды \"add\"\n@bot.message_handler(commands=[\"add\", \"todo\"])\ndef add(message):\n command = message.text.split(maxsplit=2)\n date = command[1]\n task = command[2].lower()\n if len(task) < 3:\n bot.send_message(message.chat.id, \"Задачи должны быть больше 3-х символов\")\n else:\n add_todo(date, task)\n bot.send_message(message.chat.id, f\"Задача {task} добавлена на дату {date}\")\n\n# регистрация функции и сама функция команды \"random\"\n@bot.message_handler(commands=[\"random\"])\ndef random_add(message):\n date = \"сегодня\"\n task = random.choice(RANDOM_TASKS)\n add_todo(\"сегодня\", task)\n bot.send_message(message.chat.id, f\"Задача {task} добавлена на сегодня\")\n\n# регистрация функции и сама функция команды \"show\"\n@bot.message_handler(commands=[\"show\"])\ndef show(message):\n command = message.text.split(maxsplit=1)\n date = command[1]\n text = \"\"\n if date in tasks:\n text = date.upper() + \"\\n\"\n for task in tasks[date]:\n text = text + \"[] \" + task + \"\\n\"\n else:\n text = \"Задач на эту дату нет\"\n bot.send_message(message.chat.id, text)\n\n# регистрация функции и сама функция команды \"exit\"\n@bot.message_handler(commands=[\"exit\"])\ndef exit(message):\n command = message.text\n bot.send_message(message.chat.id, \"До новых встреч!\")\n\n\n# постоянно обращается к серверам телеграм, long_polling\nbot.polling(none_stop=True)\n\n\n\n\n","repo_name":"bugab0o/telebot","sub_path":"nlbot.py","file_name":"nlbot.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"43291755729","text":"from openpyxl import load_workbook\nfrom obp_client import logger\nfrom customers import create_customer_from_row\nfrom accounts import create_account_with_customer_links_from_row\nfrom transactions import create_transaction_from_row\nfrom os import getcwd\n\n\ndata_source = f'{getcwd()}/resources/obp-data-import.xlsx'\nwb = load_workbook(data_source)\ncustomers = wb['Customers']\naccounts = wb['Accounts']\ntransactions = wb['Transactions']\n\n\nfor row in customers.iter_rows(min_row=2, max_col=customers.max_column, max_row=customers.max_row):\n\tif row[0].value is None:\n\t\tcontinue\n\tlogger.debug(f\"start customer: {row[0].row}\")\n\tcreate_customer_from_row(row)\n\nfor row in accounts.iter_rows(min_row=2, max_col=accounts.max_column, max_row=accounts.max_row):\n\tif row[0].value is None:\n\t\tcontinue\n\tlogger.debug(f\"start account: {row[0].row}\")\n\tcreate_account_with_customer_links_from_row(row)\n\nfor row in transactions.iter_rows(min_row=2, max_col=transactions.max_column, max_row=transactions.max_row):\n\tif row[13].value is None:\n\t\tcontinue\n\tlogger.debug(f\"start transaction: {row[0].row}\")\n\tcreate_transaction_from_row(row)\n\n","repo_name":"OpenBankProject/pk-data-import","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"3639541793","text":"from django.shortcuts import render\nfrom .models import *\n# Create your views here.\ndef cart(request):\n\t# 从session中获取当前用户的id\n\tuid = request.session.get('user_id')\n\t# 根据id搜索当前用户放入购物车的品种和数量\n\tcarts = CartInfo.objects.filter(user_id = 1)\n\tcontext = {'title':'购物车',\n\t\t\t 'page_name':1,\n\t\t\t 'carts':carts}\n\treturn render(request, 'df_cart/cart.html', context)","repo_name":"taotongya/taotongya.github.io","sub_path":"df_cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1018752697","text":"import pandas as pd\nfrom email.mime.text import MIMEText\nimport smtplib\nimport sys\nimport os\n\ndef check_job_card_urls():\n msg = 'Checking the list of job card urls...\\r\\n'\n\n if os.path.isfile(output_dir+'/job_card_urls.csv') and os.path.getsize(output_dir+'/job_card_urls.csv') > 0:\n job_card_urls = pd.read_csv(output_dir+'/job_card_urls.csv',\n header=None,\n names=['job_card', 'url'])\n else:\n job_card_urls = pd.DataFrame({'job_card': [], 'url': []})\n\n if os.path.isfile(gp_file) and os.path.getsize(gp_file) > 0:\n gp_list = pd.read_csv(gp_file,\n names=['district_name', 'block_name', 'panchayat_name', 'panchayat_code'],\n usecols=['district_name', 'block_name', 'panchayat_name', 'panchayat_code'],\n dtype={'panchayat_code': object})\n else:\n sys.exit('GP input file not found or empty')\n\n job_card_urls['panchayat_code'] = job_card_urls.url.apply(lambda x: x.split('panchayat_code=')[1].split('&')[0])\n job_card_urls = job_card_urls[['panchayat_code', 'job_card']].drop_duplicates().groupby(['panchayat_code']).count().reset_index()\n\n job_card_counts = pd.merge(gp_list[['panchayat_code']], job_card_urls, how='left', on='panchayat_code')\n\n job_card_counts = job_card_counts.fillna(0)\n\n if len(job_card_counts[job_card_counts.job_card == 0].index) == 0:\n msg += 'List of job card urls was populated for all panchayats\\r\\n'\n else:\n msg += 'WARNING: list of job card urls doesn\\'t contain all the study panchayats\\r\\nNeed to restart the scrape\\r\\n\\r\\n'\n msg += job_card_counts[job_card_counts.job_card == 0].to_string()\n msg += '\\r\\n'\n\n msg += '\\r\\n'\n\n return msg\n\n\ndef check_job_card_scrape():\n msg = 'Checking the progress of the job card scrape against the list of job card urls...\\r\\n'\n\n if os.path.isfile(output_dir+'/jobcard.csv') and os.path.getsize(output_dir+'/jobcard.csv') > 0:\n jobcards = pd.read_csv(output_dir+'/jobcard.csv', encoding='utf-8', usecols=['job_card_number'], dtype={'job_card_number': object})\n jobcards = jobcards[jobcards['job_card_number'] != 'job_card_number'] # Headers get appended every time the scraper runs\n else:\n jobcards = pd.DataFrame({'job_card_number': []}, dtype=object)\n\n if os.path.isfile(output_dir+'/job_card_urls.csv') and os.path.getsize(output_dir+'/job_card_urls.csv') > 0:\n job_card_urls = pd.read_csv(output_dir+'/job_card_urls.csv', header=None, names=['job_card', 'url'])\n else:\n job_card_urls = pd.DataFrame({'job_card': [], 'url': []})\n\n jc_df = pd.merge(job_card_urls, jobcards.drop_duplicates(), how='left', left_on='job_card', right_on='job_card_number')\n\n jc_notscraped_df = jc_df[pd.isnull(jc_df.job_card_number)][['job_card', 'url']]\n\n if len(jc_notscraped_df.index) == 0:\n jc_total = len(jc_df.index)\n msg += 'All {} of the job cards have been scraped\\r\\n'.format(jc_total)\n else:\n jc_total = len(jc_df.index)\n jc_scraped = jc_total - len(jc_notscraped_df.index)\n jc_pct = (float(jc_scraped)/float(jc_total))*100\n msg += '{} of {} job cards have been scraped ({:.1f}%)\\r\\n'.format(jc_scraped, jc_total, jc_pct)\n\n msg += '\\r\\n'\n return msg\n\ndef check_muster_scrape():\n msg = 'Checking the progress of the muster roll scrape against the list of encountered muster urls...\\r\\n'\n msg += 'Note: list of encountered muster roll urls is populated from the job card pages and will grow until all job cards are scraped\\r\\n'\n\n if os.path.isfile(output_dir+'/muster.csv') and os.path.getsize(output_dir+'/muster.csv') > 0:\n musters = pd.read_csv(output_dir+'/muster.csv', encoding='utf-8', usecols=['work_code', 'msr_no'], dtype={'work_code': object, 'msr_no': object})\n musters = musters[musters.work_code != 'work_code'] # when the script restarts it puts in an extra header row\n else:\n musters = pd.DataFrame({'work_code': [], 'msr_no': []}, dtype=object)\n\n musters['right'] = 1\n\n # Find all the musters that haven't been scraped\n if os.path.isfile(output_dir+'/encountered_muster_links.csv'):\n encountered_muster_links = pd.read_csv(output_dir+'/encountered_muster_links.csv',\n header=None,\n names=['job_card', 'url', 'msr_no', 'muster_url', 'work_code'],\n usecols=['msr_no', 'work_code', 'muster_url'],\n encoding='utf-8',\n dtype={'work_code': object, 'msr_no': object, 'muster_url': object})\n else:\n encountered_muster_links = pd.DataFrame({'msr_no': [], 'muster_url': [], 'work_code': []}, dtype=object)\n\n mr_df = pd.merge(encountered_muster_links, musters.drop_duplicates(), how='left', on=['msr_no', 'work_code'])\n\n mr_notscraped_df = mr_df[pd.isnull(mr_df.right)] # keep the musters that haven't been scraped yet\n\n if len(mr_notscraped_df.index) == 0:\n mr_total = len(mr_df.index)\n msg += 'All {} of the encountered muster roll urls have been scraped\\r\\n'.format(mr_total)\n else:\n mr_total = len(mr_df.index)\n mr_scraped = mr_total - len(mr_notscraped_df.index)\n mr_pct = (float(mr_scraped)/float(mr_total))*100\n msg += '{} of {} encountered muster roll urls have been scraped ({:.1f}%)\\r\\n'.format(mr_scraped, mr_total, mr_pct)\n\n msg += '\\r\\n'\n\n return msg\n\ndef send_email(email_recipients, msg_string):\n s = smtplib.SMTP('smtp.mailgun.org', 587)\n #with open('password.txt', 'r') as f:\n\t#password = f.read()\n #password = '9db9446b166c39ebb2fda2bcb2293b3b-07e45e2a-6dbd4c5a'\n s.login('postmaster@sandboxdf8537b17f3745c58b9d87370ed2eedd.mailgun.org', password)\n msg = MIMEText(msg_string)\n msg['Subject'] = 'FBA Scrape Progress'\n msg['From'] = 'FBA Progress Tracker '\n msg['To'] = ','.join(email_recipients)\n s.sendmail('postmaster@sandboxdf8537b17f3745c58b9d87370ed2eedd.mailgun.org', email_recipients, msg.as_string())\n s.quit()\n\nif __name__ == '__main__':\n\n input_dir = './input'\n output_dir = './full_output'\n\n gp_file = input_dir + '/gp_list.csv'\n\n email_recipients = [\n 'michael.y.chang@yale.edu',\n ]\n\n msg_string = ''\n\n msg_string += check_job_card_urls()\n msg_string += check_job_card_scrape()\n msg_string += check_muster_scrape()\n\n # print msg_string\n send_email(email_recipients, msg_string)\n","repo_name":"hks-epod/fba-full-scrape","sub_path":"check_progress.py","file_name":"check_progress.py","file_ext":"py","file_size_in_byte":6717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"71600805607","text":"# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\n\n\ndef dense_layer(inputs, kernel_size, num_filters, keep_prob, scope, regularizer=None):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n input_size = inputs.get_shape()[-1] # [batch_size, max_len, input_size]\n W = tf.get_variable(\"W\", shape=[kernel_size, input_size, num_filters], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer(), regularizer=regularizer)\n b = tf.get_variable(\"b\", shape=[num_filters], dtype=tf.float32,\n initializer=tf.zeros_initializer(), regularizer=regularizer)\n outputs = tf.nn.conv1d(inputs, W, 1, padding='SAME') + b\n outputs = tf.nn.leaky_relu(outputs)\n outputs = tf.nn.dropout(outputs, keep_prob=keep_prob) # [batch_size, max_len, num_filters]\n return outputs\n\ndef fc_layer(inputs, output_dim, keep_prob, scope, regularizer=None):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n input_size = inputs.get_shape()[-1]\n W = tf.get_variable(\"W\", shape=[input_size, output_dim], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer(), regularizer=regularizer)\n b = tf.get_variable(\"b\", shape=[output_dim], dtype=tf.float32,\n initializer=tf.zeros_initializer(), regularizer=regularizer)\n\n if inputs.shape.ndims == 3:\n outputs = tf.einsum(\"abc,cd->abd\", inputs, W) + b\n else:\n outputs = tf.matmul(inputs, W) + b\n outputs = tf.nn.leaky_relu(outputs)\n outputs = tf.nn.dropout(outputs, keep_prob)\n return outputs, W\n\ndef get_masked_weights(inputs, seq_len, max_len):\n seq_mask = tf.sequence_mask(seq_len, max_len, dtype=tf.float32) # [batch_size, max_len]\n seq_mask = tf.expand_dims(seq_mask, 1) # [batch_size, 1, max_len]\n outputs = inputs * seq_mask + (seq_mask - 1) * 1e9\n outputs = tf.nn.softmax(outputs, axis=-1)\n return outputs\n\n\nclass DSA(object):\n def __init__(self, max_len_left, max_len_right, vocab_size,\n embedding_size, num_hidden,\n d_1, d_l, k_1, k_2, num_layers, d_c,\n num_attentions, d_o, num_iter, mu=1e-2, l2_reg_lambda=0.0):\n\n regularizer = layers.l2_regularizer(l2_reg_lambda)\n\n # placeholder for input data\n self.input_left = tf.placeholder(tf.int32, shape=[None, max_len_left],\n name=\"input_left\")\n self.input_right = tf.placeholder(tf.int32, shape=[None, max_len_right],\n name=\"input_right\")\n self.input_y = tf.placeholder(tf.float32, shape=[None, 2],\n name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n with tf.name_scope(\"embedding\"):\n self.embedding_weight = tf.get_variable(\"embedding_weight\",\n shape=[vocab_size, embedding_size],\n dtype=tf.float32,\n initializer=tf.truncated_normal_initializer())\n self.emb_left = tf.nn.embedding_lookup(self.embedding_weight, self.input_left, name=\"emb_left\")\n self.emb_right = tf.nn.embedding_lookup(self.embedding_weight, self.input_right, name=\"emb_right\")\n\n self.length_left = self.get_length(self.input_left)\n self.length_right = self.get_length(self.input_right)\n\n with tf.name_scope('dense_layers'):\n X_1_left = dense_layer(self.emb_left, 1, d_1, self.dropout_keep_prob, 'dense_layer_1', regularizer)\n X_1_right = dense_layer(self.emb_right, 1, d_1, self.dropout_keep_prob, 'dense_layer_1', regularizer)\n\n k_size_list = [k_1, k_2]\n layer_outputs_left = [[X_1_left], [X_1_left]]\n layer_outputs_right = [[X_1_right], [X_1_right]]\n for k in range(2):\n for l in range(2, num_layers + 1):\n temp_inputs_left = tf.concat(layer_outputs_left[k], axis=-1)\n temp_inputs_right = tf.concat(layer_outputs_right[k], axis=-1)\n\n X_i_left = dense_layer(temp_inputs_left, k_size_list[k], d_l, self.dropout_keep_prob,\n 'dense_layer_{}_{}'.format(k, l), regularizer)\n X_i_right = dense_layer(temp_inputs_right, k_size_list[k], d_l, self.dropout_keep_prob,\n 'dense_layer_{}_{}'.format(k, l), regularizer)\n\n layer_outputs_left[k].append(X_i_left)\n layer_outputs_right[k].append(X_i_right)\n\n concat_outputs_left = [self.emb_left] + layer_outputs_left[0] + layer_outputs_left[1]\n concat_outputs_right = [self.emb_right] + layer_outputs_right[0] + layer_outputs_right[1]\n\n self.X_c_left = dense_layer(tf.concat(concat_outputs_left, -1), 1, d_c, self.dropout_keep_prob,\n 'dense_layer_c', regularizer)\n self.X_c_right = dense_layer(tf.concat(concat_outputs_right, -1), 1, d_c, self.dropout_keep_prob,\n 'dense_layer_c', regularizer)\n\n\n with tf.name_scope('dynamic_self_attention'):\n Z_left = []\n Z_right = []\n W_j = []\n for j in range(num_attentions):\n X_hat_left, W = fc_layer(self.X_c_left, d_o, 1.0, 'dsa_{}'.format(j), regularizer)\n X_hat_right, _ = fc_layer(self.X_c_right, d_o, 1.0, 'dsa_{}'.format(j), regularizer)\n\n q_left = tf.zeros(shape=[tf.shape(X_hat_left)[0], 1, max_len_left], dtype=tf.float32)\n q_right = tf.zeros(shape=[tf.shape(X_hat_right)[0], 1, max_len_right], dtype=tf.float32)\n\n for r in range(num_iter):\n a_left = get_masked_weights(q_left, self.length_left, max_len_left)\n s_left = tf.matmul(a_left, X_hat_left) # [batch_size, 1, d_o]\n z_left = tf.nn.tanh(s_left)\n\n a_right = get_masked_weights(q_right, self.length_right, max_len_right)\n s_right = tf.matmul(a_right, X_hat_right) # [batch_size, 1, d_o]\n z_right = tf.nn.tanh(s_right)\n\n if r == num_iter-1:\n Z_left.append(tf.reshape(z_left, shape=[-1, d_o]))\n Z_right.append(tf.reshape(z_right, shape=[-1, d_o]))\n # for visualize\n att_left = tf.identity(a_left, name='attention_left')\n att_right = tf.identity(a_right, name='attention_right')\n\n X_left_temp = X_hat_left / tf.sqrt(tf.reduce_sum(tf.square(X_hat_left), axis=-1, keepdims=True))\n z_left_temp = z_left / tf.sqrt(tf.reduce_sum(tf.square(z_left), axis=-1, keepdims=True))\n X_right_temp = X_hat_right / tf.sqrt(tf.reduce_sum(tf.square(X_hat_right), axis=-1, keepdims=True))\n z_right_temp = z_right / tf.sqrt(tf.reduce_sum(tf.square(z_right), axis=-1, keepdims=True))\n\n q_left = q_left + tf.matmul(z_left_temp, tf.transpose(X_left_temp, [0, 2, 1]))\n q_right = q_right + tf.matmul(z_right_temp, tf.transpose(X_right_temp, [0, 2, 1]))\n\n W_j.append(W)\n\n\n with tf.name_scope('penalization'):\n self.penalty = 0.0\n for i in range(num_attentions):\n for j in range(i+1, num_attentions):\n self.penalty += tf.nn.relu(1 - tf.square(tf.norm(W_j[i]-W_j[j], ord='fro', axis=[0, 1])))\n\n\n with tf.name_scope('mlp_layer'):\n self.V_left = tf.concat(Z_left, axis=-1)\n self.V_right = tf.concat(Z_right, axis=-1)\n self.V = tf.concat([self.V_left, self.V_right, tf.abs(self.V_left-self.V_right),\n tf.multiply(self.V_left, self.V_right)], axis=-1)\n\n output, _ = fc_layer(self.V, num_hidden, self.dropout_keep_prob, 'fc_1', regularizer=regularizer)\n # has a shortcut connection\n self.full_out, _ = fc_layer(tf.concat([self.V, output], axis=-1), num_hidden, self.dropout_keep_prob, 'fc_2',\n regularizer=regularizer)\n\n with tf.name_scope(\"output\"):\n W = tf.get_variable(\n \"W_output\",\n shape=[num_hidden, 2], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer(), regularizer=regularizer)\n b = tf.get_variable(\"b_output\", shape=[2], dtype=tf.float32, initializer=tf.constant_initializer(0.1),\n regularizer= regularizer)\n self.scores = tf.nn.xw_plus_b(self.full_out, W, b, name=\"scores\")\n self.predictions = tf.argmax(self.scores, 1, name=\"predictions\")\n\n # CalculateMean cross-entropy loss\n with tf.name_scope(\"loss\"):\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)\n self.loss = tf.reduce_mean(losses) + mu * self.penalty + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n\n # Accuracy\n with tf.name_scope(\"accuracy\"):\n correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name=\"accuracy\")\n\n\n @staticmethod\n def get_length(x):\n x_sign = tf.sign(tf.abs(x))\n length = tf.reduce_sum(x_sign, axis=1)\n return tf.cast(length, tf.int32)\n\nif __name__ == '__main__':\n a = tf.get_variable('a', shape=[2, 3, 4], dtype=tf.float32, initializer=tf.truncated_normal_initializer())\n b = tf.get_variable('b', shape=[2, 1, 4], dtype=tf.float32, initializer=tf.truncated_normal_initializer())\n\n a = a/tf.sqrt(tf.reduce_sum(tf.square(a), axis=-1, keepdims=True))\n b = b/tf.sqrt(tf.reduce_sum(tf.square(b), axis=-1, keepdims=True))\n\n c = tf.matmul(b, tf.transpose(a, [0, 2, 1]))\n d = tf.expand_dims(tf.reduce_sum(tf.multiply(a, b), axis=-1), 1)\n\n e = tf.constant([1, 1, 2**0.5], dtype=tf.float32)\n f = e/tf.sqrt(tf.reduce_sum(tf.square(e), axis=-1, keepdims=True))\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n c_, d_, f_ = sess.run([c, d, f])\n print(c_)\n print(d_)\n print(f_)","repo_name":"zhongbin1/DeepMatching","sub_path":"models/DSA.py","file_name":"DSA.py","file_ext":"py","file_size_in_byte":10571,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"}
+{"seq_id":"71254897447","text":"# %%\nimport torch.nn as nn\nimport torch\nfrom torch.nn.modules import padding\nfrom torch.nn.modules.conv import Conv1d\nfrom torch.utils.data import DataLoader, Dataset\nimport torch.nn.functional as F\n\nclass Conv1dReLU(nn.Module):\n '''\n kernel_size=3, stride=1, padding=1\n kernel_size=5, stride=1, padding=2\n kernel_size=7, stride=1, padding=3\n '''\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):\n super().__init__()\n self.inc = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, out_channels=out_channels,kernel_size=kernel_size, stride=stride, padding=padding),\n nn.ReLU()\n )\n \n def forward(self, x):\n\n return self.inc(x)\n\nclass LinearReLU(nn.Module):\n def __init__(self,in_features, out_features, bias=True):\n super().__init__()\n self.inc = nn.Sequential(\n nn.Linear(in_features=in_features, out_features=out_features, bias=bias),\n nn.ReLU()\n )\n\n def forward(self, x):\n \n return self.inc(x)\n\nclass BilinearPooling(nn.Module):\n def __init__(self, in_channels, out_channels, c_m, c_n):\n super().__init__()\n\n self.convA = nn.Conv1d(in_channels, c_m, kernel_size=1, stride=1, padding=0)\n self.convB = nn.Conv1d(in_channels, c_n, kernel_size=1, stride=1, padding=0)\n self.linear = nn.Linear(c_m, out_channels, bias=True)\n\n def forward(self, x):\n '''\n x: (batch, channels, seq_len)\n A: (batch, c_m, seq_len)\n B: (batch, c_n, seq_len)\n att_maps.permute(0, 2, 1): (batch, seq_len, c_n)\n global_descriptors: (batch, c_m, c_n)\n ''' \n A = self.convA(x) \n B = self.convB(x)\n att_maps = F.softmax(B, dim=-1)\n global_descriptors = torch.bmm(A, att_maps.permute(0, 2, 1))\n global_descriptor = torch.mean(global_descriptors, dim=-1)\n out = self.linear(global_descriptor).unsqueeze(1)\n\n return out\n\nclass MutualAttentation(nn.Module):\n def __init__(self, in_channels, att_size, c_m, c_n):\n super().__init__()\n self.bipool = BilinearPooling(in_channels, in_channels, c_m, c_n)\n self.linearS = nn.Linear(in_channels, att_size)\n self.linearT = nn.Linear(in_channels, att_size)\n \n def forward(self, source, target):\n '''\n source: (batch, channels, seq_len)\n target: (batch, channels, seq_len)\n global_descriptor: (batch, 1, channels)\n '''\n global_descriptor = self.bipool(source)\n target_org = target\n target = self.linearT(target.permute(0, 2, 1)).permute(0, 2, 1)\n global_descriptor = self.linearS(global_descriptor)\n att_maps = torch.bmm(global_descriptor, target)\n att_maps = F.sigmoid(att_maps)\n out_target = torch.add(target_org, torch.mul(target_org, att_maps))\n out_target = F.relu(out_target)\n\n return out_target\n\n\nclass DTImodel(nn.Module):\n def __init__(self, vocab_prot_size, vocab_drug_size, embedding_size=128, filter_num=32):\n super().__init__()\n prot_filter_size = [4, 8, 12]\n drug_filter_size = [4, 6, 8]\n\n self.prot_embed = nn.Embedding(vocab_prot_size, embedding_size, padding_idx=0)\n self.prot_conv1 = Conv1dReLU(embedding_size, filter_num, prot_filter_size[0])\n self.prot_conv2 = Conv1dReLU(filter_num, filter_num * 2, prot_filter_size[1])\n self.prot_conv3 = Conv1dReLU(filter_num * 2, filter_num * 3, prot_filter_size[2])\n self.prot_pool = nn.AdaptiveMaxPool1d(1)\n\n self.drug_embed = nn.Embedding(vocab_drug_size, embedding_size, padding_idx=0)\n self.drug_conv1 = Conv1dReLU(embedding_size, filter_num, drug_filter_size[0])\n self.drug_conv2 = Conv1dReLU(filter_num, filter_num * 2, drug_filter_size[1])\n self.drug_conv3 = Conv1dReLU(filter_num * 2, filter_num * 3, drug_filter_size[2])\n self.drug_pool = nn.AdaptiveMaxPool1d(1)\n\n self.prot_mut_att1 = MutualAttentation(filter_num, filter_num, filter_num, 8)\n self.drug_mut_att1 = MutualAttentation(filter_num, filter_num, filter_num, 8)\n\n self.prot_mut_att2 = MutualAttentation(filter_num*2, filter_num, filter_num, 8)\n self.drug_mut_att2 = MutualAttentation(filter_num*2, filter_num, filter_num, 8)\n\n self.prot_mut_att3 = MutualAttentation(filter_num*3, filter_num, filter_num, 8)\n self.drug_mut_att3 = MutualAttentation(filter_num*3, filter_num, filter_num, 8)\n\n self.linear1 = LinearReLU(filter_num * 3 * 2, 1024)\n self.drop1 = nn.Dropout(0.1)\n self.linear2 = LinearReLU(1024, 1024)\n self.drop2 = nn.Dropout(0.1)\n self.linear3 = LinearReLU(1024, 512)\n self.drop3 = nn.Dropout(0.1)\n self.out_layer = nn.Linear(512, 1)\n\n def forward(self, prot_x, drug_x):\n prot_x = self.prot_embed(prot_x).permute(0, 2, 1)\n drug_x = self.drug_embed(drug_x).permute(0, 2, 1)\n\n prot_x = self.prot_conv1(prot_x)\n drug_x = self.drug_conv1(drug_x)\n prot_x_g = self.prot_mut_att1(drug_x, prot_x)\n drug_x_g = self.drug_mut_att1(prot_x, drug_x)\n\n prot_x = self.prot_conv2(prot_x_g)\n drug_x = self.drug_conv2(drug_x_g)\n prot_x_g = self.prot_mut_att2(drug_x, prot_x)\n drug_x_g = self.drug_mut_att2(prot_x, drug_x)\n\n prot_x = self.prot_conv3(prot_x_g)\n drug_x = self.drug_conv3(drug_x_g)\n prot_x_g = self.prot_mut_att3(drug_x, prot_x)\n drug_x_g = self.drug_mut_att3(prot_x, drug_x)\n\n prot_x = self.prot_pool(prot_x_g).squeeze(-1)\n drug_x = self.drug_pool(drug_x_g).squeeze(-1)\n\n x = torch.cat([prot_x, drug_x], dim=-1)\n x = self.linear1(x)\n x = self.drop1(x)\n x = self.linear2(x)\n x = self.drop2(x)\n x = self.linear3(x)\n x = self.drop3(x)\n x = self.out_layer(x)\n\n return x\n\n# %%\nif __name__ == \"__main__\":\n protein_x = torch.randint(0, 20, (1, 1200))\n ligand_x = torch.randint(0, 65, (1, 64)) \n net = DTImodel(20+1, 65+1)\n res = net(protein_x, ligand_x)\n print(res.shape)\n\n# %%","repo_name":"guaguabujianle/ML-DTI","sub_path":"network/ML_DTI.py","file_name":"ML_DTI.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"}
+{"seq_id":"25794233734","text":"import cv2\nimport numpy as np\n\nimagemOriginal = cv2.imread(\"Train_1.jpg\", 0)\n\nelementoEstruturante = cv2.getStructuringElement(\n cv2.MORPH_ELLIPSE, (25,25)\n)\n\nimagemProcessada = cv2.morphologyEx(\n imagemOriginal, cv2.MORPH_TOPHAT, elementoEstruturante\n)\n\nimagemTratada = cv2.add(imagemProcessada, imagemProcessada)\n\ncv2.imshow(\"Original\", imagemOriginal)\ncv2.imshow(\"Resultado\", imagemProcessada)\ncv2.imshow(\"Final\", imagemTratada)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"ThayDias/visao-computacional","sub_path":"Introducao a Visao Computacional/8. Operacoes Morfologicas/topHat.py","file_name":"topHat.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"31517956597","text":"from abc import ABC, abstractmethod\r\n\r\nfrom project.baked_food.baked_food import BakedFood\r\nfrom project.drink.drink import Drink\r\nfrom project.tools.validator.validator import Validator\r\n\r\n\r\nclass Table(ABC):\r\n @abstractmethod\r\n def __init__(self, table_number: int, capacity: int):\r\n self.table_number = table_number\r\n self.capacity = capacity\r\n self.food_orders = []\r\n self.drink_orders = []\r\n self.number_of_people = 0\r\n self.is_reserved = False\r\n\r\n @property\r\n def capacity(self):\r\n return self.__capacity\r\n\r\n @capacity.setter\r\n def capacity(self, value):\r\n Validator.check_if_value_is_0_or_below(value, \"Capacity has to be greater than 0!\")\r\n self.__capacity = value\r\n\r\n def reserve(self, number_of_people: int):\r\n self.is_reserved = True\r\n self.number_of_people = number_of_people\r\n\r\n def order_food(self, food: BakedFood):\r\n self.food_orders.append(food)\r\n\r\n def order_drink(self, drink: Drink):\r\n self.drink_orders.append(drink)\r\n\r\n def get_bill(self):\r\n bill_total = 0\r\n for food in self.food_orders:\r\n bill_total += food.price\r\n for drink in self.drink_orders:\r\n bill_total += drink.price\r\n return bill_total\r\n\r\n def clear(self):\r\n self.food_orders.clear()\r\n self.drink_orders.clear()\r\n self.number_of_people = 0\r\n self.is_reserved = False\r\n\r\n def free_table_info(self):\r\n if not self.is_reserved:\r\n output = f\"Table: {self.table_number}\\n\"\r\n output += f\"Type: {self.__class__.__name__}\\n\"\r\n output += f\"Capacity: {self.capacity}\\n\"\r\n\r\n return output.strip()\r\n return None","repo_name":"MihailPo91/SoftUni","sub_path":"OOP_Exam_Prep/06/exam_skeleton/project/table/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"39263402785","text":"import streamlit as st\nst.title(\"EDA PAGE\")\nfrom home import data \nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nst.set_option('deprecation.showPyplotGlobalUse', False)\nplt.style.use(\"fivethirtyeight\")\n\n#show data\nif st.sidebar.checkbox('show data',False):\n st.write(data)\n\n\n\ndef main():\n st.sidebar.subheader(\"Choose the plot\")\n\n\n \n\n #univariate histograms\n def histograms(data):\n st.write(\"Histograms\")\n data.hist()\n plt.tight_layout()\n st.pyplot()\n\n def histplot_boxplot(data, feature, bins=None, figsize=(12,7)):\n fig, (ax_box, ax_hist)=plt.subplots(\n nrows=2,\n sharex=True,\n gridspec_kw={\"height_ratios\":(0.25, 0.75)},\n figsize=figsize)\n \n sns.boxplot(data=data, x=feature, showmeans=True, ax=ax_box, color=\"violet\")\n sns.histplot(data=data, x=feature, bins=bins,ax=ax_hist, pallete=\"winter\") if bins else sns.histplot(data=data,\n x=feature, ax=ax_hist)\n ax_hist.axvline(data[feature].mean(), color='green', linestyle=\"--\")\n ax_hist.axvline(data[feature].median(), color='black', linestyle=\"-\")\n \n st.pyplot()\n \n def countplot(data, feature):\n plt.figure(figsize=(12,7))\n ax=sns.countplot(data=data, x=feature, color=\"green\")\n for p in ax.patches:\n x=p.get_bbox().get_points()[:,0]\n y=p.get_bbox().get_points()[1,1]\n ax.annotate(\"{:.3g}%\".format(100.*y/len(data)), (x.mean(), y), ha=\"center\", va=\"bottom\")\n st.pyplot()\n\n plot=st.sidebar.selectbox(\"Choose Univariates Plot\", ('histograms', 'boxplot-histplot','countplot'))\n if plot==\"histograms\":\n if st.sidebar.button(\"PLOT\"):\n histograms(data)\n\n if plot==\"boxplot-histplot\":\n if st.sidebar.button(\"PLOT\"):\n for col in data.select_dtypes(exclude=\"O\").columns:\n st.write(col)\n histplot_boxplot(data=data, feature=col)\n if plot==\"countplot\":\n if st.sidebar.button(\"PLOT\"):\n countplot(data, feature=\"Outcome\")\n\n\n\n Bivariates_plots=st.sidebar.selectbox('Choose Bivartes Graph',('Bivarties Bar Graph','Scatter Plots','Heatmap')) \n \n # Bivariate -categorical vs numerical\n def bivariate_barplot(data, feature1, feature2):\n data.groupby(feature1)[feature2].mean().plot(kind=\"bar\", color=\"orange\")\n plt.ylabel(feature2)\n st.pyplot()\n \n \n \n if Bivariates_plots=='Bivarties Bar Graph':\n if st.button('BAR PLOT',key='bivar'):\n \n \n\n for col in data.select_dtypes(exclude=\"O\").columns:\n print(\"Outcome vs \", col)\n bivariate_barplot(data=data, feature1=\"Outcome\",feature2= col)\n print(\"**************************************\")\n \n\n\n # numericall vs numerical\n def lineplot_scatterplot(data, feature1, feature2):\n plt.figure(figsize=(16,7))\n plt.subplot(1,2,1)\n sns.lineplot(data=data, x=feature1, y=feature2, color=\"green\")\n plt.title(\"Lineplotbetween {0} and {1}\".format(feature1, feature2))\n \n plt.subplot(1,2,2)\n sns.scatterplot(data=data, x=feature1, y=feature2, color=\"orange\", hue=\"Outcome\")\n plt.title(\"Scatter Plot Between {0} and {1}\".format(feature1, feature2))\n st.pyplot()\n\n\n if Bivariates_plots=='Scatter Plots':\n if st.button('SCATTER PLOT', key='scatter'):\n\n for col in data.select_dtypes(exclude=\"O\").columns:\n lineplot_scatterplot(data=data, feature1=\"Age\", feature2=col)\n\n\n\n\n if Bivariates_plots=='Heatmap':\n if st.button('HEATMAP PLOT', key='heatmap'):\n plt.figure(figsize=(12,7))\n sns.heatmap(data.corr(), annot=True, cmap=\"Spectral\", vmin=-1, vmax=+1)\n\n\n\n\n #multivarites\n\n\n\n \n\n\n\n\n \n \n\nif __name__==\"__main__\":\n main()\n\n ","repo_name":"VivianArriah/demo_nish","sub_path":"eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15343591850","text":"import numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport qiskit\r\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister\r\n\r\nstart_time = time.time()\r\n\r\n# def CZ(qbit1,qbit2):\r\n# # qbit1 = [c_0,c_1]\r\n# # qbit2 = [d_0,d_1]\r\n# return [qbit1[0]*qbit2[0],qbit1[0]*qbit2[1],-qbit1[1]*qbit2[0],-qbit1[1]*qbit2[1]]\r\n#\r\n# def CNOT(qbit1,qbit2):\r\n# # qbit1 = [c_0,c_1]\r\n# # qbit2 = [d_0,d_1]\r\n# return [qbit1[0]*qbit2[0],qbit1[0]*qbit2[1],qbit1[1]*qbit2[1],qbit1[1]*qbit2[0]]\r\n\r\ndef normalize(state):\r\n # takes np.array representing state of system of qubits\r\n # outputs state normalized using Euclidean norm\r\n\r\n sum_of_squares = 0\r\n for i in range(len(state)):\r\n sum_of_squares += np.abs(state[i]) ** 2\r\n return [j/np.sqrt(sum_of_squares) for j in state]\r\n\r\n\r\ndef total_CNOT(init_state, i1,i2, normalized):\r\n # init state is a list of 2**n entries, each corresponding to the coefficient of that basis state\r\n # n = total number of qubits considered\r\n n = int(np.log2(len(init_state)))\r\n # i1 = index of control qubit, i2 = index of target qubit\r\n new_state = init_state.copy()\r\n for i in range(len(init_state)):\r\n basis_state = \"{0:b}\".format(i).zfill(n)\r\n basis_state_entry = init_state[i]\r\n if basis_state[i1] == '1':\r\n basis_state_list = list(basis_state)\r\n basis_state_list[i2] = str(1 - int(basis_state[i2]))\r\n new_basis_state= ''.join([str(item) for item in basis_state_list])\r\n new_basis_state_index = int(new_basis_state,2)\r\n new_state[new_basis_state_index] += basis_state_entry\r\n new_state[i] = new_state[i] - basis_state_entry\r\n\r\n if normalized == False:\r\n return new_state\r\n if normalized == True:\r\n return normalize(new_state)\r\n\r\n\r\n\r\ndef total_CZ(init_state, i1,i2):\r\n # init state is a list of 2**n entries, each corresponding to the coefficient of that basis state\r\n # NOTE: after applying a CZ gate, the state remains normalized\r\n\r\n # n = total number of qubits considered\r\n n = int(np.log2(len(init_state)))\r\n # i1 = index of control qubit, i2 = index of target qubit\r\n new_state = init_state.copy()\r\n for i in range(len(init_state)):\r\n basis_state = \"{0:b}\".format(i).zfill(n)\r\n basis_state_entry = init_state[i]\r\n\r\n if basis_state[i1] == '0' and basis_state[i2] == '0':\r\n new_state[i] = - basis_state_entry\r\n\r\n return new_state\r\n\r\ndef add_two_qbits(state1, state2):\r\n # takes 2 states, each an arbitrary number of qubits n & m, written in their respective bases\r\n # 1st state has 2**n basis vectors, 2nd state has 2**m basis vectors\r\n # e.g. for 1 qubit: qbit = [c_0,c_1]\r\n # outputs the overall state, written in the total basis, which has 2**(n+m) basis vectors\r\n\r\n n = int(np.log2(len(state1))) # number of qubits of 1st state\r\n m = int(np.log2(len(state2))) # number of qubits of 2nd state\r\n total_state = np.zeros(2**(n+m)) # 2**(n+m) = len(state1)+len(state1)\r\n for i in range(len(state1)):\r\n basis_state_1 = \"{0:b}\".format(i).zfill(n)\r\n basis_state_1_entry = state1[i]\r\n for j in range((len(state2))):\r\n basis_state_2 = \"{0:b}\".format(j).zfill(m)\r\n basis_state_2_entry = state2[j]\r\n\r\n basis_state_total = basis_state_1 + basis_state_2\r\n basis_state_total_index = int(basis_state_total,2)\r\n total_state[basis_state_total_index] = basis_state_1_entry*basis_state_2_entry\r\n return normalize(total_state)\r\n\r\n\r\n# add_qbits(*argv)\r\n# A wrapper around `add_two_qbits`.\r\n# Takes an arbitrary number of arguments, and performs\r\n# addition on all of them.\r\n#\r\n# Should not be called with fewer than 2 arguments.\r\n#\r\n# Should not be called with an extremely large number\r\n# of arguments, as the recursion depth limit might be\r\n# exceeded.\r\n\r\ndef add_qbits(*argv):\r\n assert len(argv) >= 2, (\"add_qbits cannot be called \"\r\n \"with fewer than 2 arguments\")\r\n\r\n if len(argv) == 2:\r\n return add_two_qbits(*argv)\r\n\r\n return add_qbits(add_two_qbits(argv[0], argv[1]), *argv[2:])\r\n\r\n\r\n\r\ndef un_entangle_1_qubit(state,chosen_qubit):\r\n n = int(np.log2(len(state))) # number of qubits of state\r\n new_state_0 = np.zeros(2**(n-1))\r\n new_state_1 = np.zeros(2**(n-1))\r\n\r\n for i in range(len(state)):\r\n # print('i=', i)\r\n\r\n basis_state = \"{0:b}\".format(i).zfill(n)\r\n # print('basis_state=', basis_state)\r\n\r\n basis_state_entry = state[i]\r\n # print('basis_state_entry=',basis_state_entry)\r\n\r\n basis_state_list = list(basis_state)\r\n\r\n del basis_state_list[chosen_qubit]\r\n new_basis_state = ''.join([str(item) for item in basis_state_list])\r\n new_basis_state_index = int(new_basis_state, 2)\r\n # print('basis_state[chosen_qubit]=',basis_state[chosen_qubit])\r\n\r\n\r\n if basis_state[chosen_qubit] == '0':\r\n new_state_0[new_basis_state_index] = basis_state_entry\r\n else:\r\n new_state_1[new_basis_state_index] = basis_state_entry\r\n # print('new_state_0=',new_state_0)\r\n # print('new_state_1=', new_state_1)\r\n\r\n\r\n all_0_for_0 = np.all((new_state_0 == 0))\r\n all_0_for_1 = np.all((new_state_1 == 0))\r\n # print(all_0_for_0)\r\n # print(all_0_for_1)\r\n\r\n if all_0_for_0 == False and all_0_for_1 == False:\r\n ratio = 0\r\n for j in range(2**(n-1)):\r\n if new_state_0[j] == 0 and new_state_1[j] != 0:\r\n return 'Cannot un-entangle this qubit from the overall state.'\r\n elif new_state_1[j] == 0 and new_state_0[j] != 0:\r\n return 'Cannot un-entangle this qubit from the overall state.'\r\n elif new_state_1[j] != 0 and new_state_0[j] != 0:\r\n ratio += new_state_0[j]/new_state_1[j]\r\n break\r\n # print('ratio=',ratio)\r\n\r\n for j in range(2**(n-1)):\r\n if new_state_0[j] == 0 and new_state_1[j] != 0:\r\n return 'Cannot un-entangle this qubit from the overall state.'\r\n elif new_state_1[j] == 0 and new_state_0[j] != 0:\r\n return 'Cannot un-entangle this qubit from the overall state.'\r\n elif new_state_1[j] != 0 and new_state_0[j] != 0:\r\n if ratio != new_state_0[j]/new_state_1[j]:\r\n return 'Cannot un-entangle this qubit from the overall state.'\r\n\r\n return (normalize(new_state_0), normalize([1,1/ratio]))\r\n\r\n\r\n elif all_0_for_0 == True:\r\n return (normalize(new_state_1), [0, 1])\r\n\r\n elif all_0_for_1 == True:\r\n return (normalize(new_state_0), [1, 0])\r\n\r\n\r\n\r\n\r\nc_0 = np.sqrt(1/2)\r\nc_1 = np.sqrt(1/2)\r\nd_0 = np.sqrt(1/3)\r\n# d_1 = np.sqrt(2/3)\r\nd_1 = 0\r\nq_1 = [c_0,c_1]\r\nq_2 = [d_0,d_1]\r\na1a2 = [np.sqrt(1/2),0,0,np.sqrt(1/2)] # Bell state\r\n\r\nq1a1a2q2 = add_qbits(q_1,a1a2,q_2)\r\n# print(un_entangle(q1a1a2q2,2))\r\nprint(q1a1a2q2)\r\n\r\ninter_state = total_CNOT(q1a1a2q2,0,1,True)\r\ninter_state = total_CZ(inter_state,1,2)\r\ninter_state = total_CNOT(inter_state,0,1,True)\r\ninter_state = total_CNOT(inter_state,3,2,True)\r\n\r\nprint(inter_state)\r\nprint(un_entangle_1_qubit(inter_state,0))\r\n\r\n# Function meant to untangle ANY NUMBER of qubits from the total state\r\n# NOT YET FINISHED\r\n\r\n# def un_entangle(state,chosen_qubits):\r\n# # chosen_qubits = list of qubits to unentangle\r\n# n = int(np.log2(len(state))) # number of total qubits of state\r\n# m = len(chosen_qubits) # number of qubits to un_entangle\r\n# output_state = np.zeros(2**(n-m))\r\n# output_state_unentangled = np.zeros(2 ** m)\r\n# list_of_lists = []\r\n#\r\n# for i in range(2**(m)):\r\n# state_of_one_qubit = np.zeros(2**(n-m))\r\n# list_of_lists.append(state_of_one_qubit)\r\n#\r\n# for i in range(len(state)):\r\n# # print('i=', i)\r\n#\r\n# basis_state = \"{0:b}\".format(i).zfill(n)\r\n# # print('basis_state=', basis_state)\r\n#\r\n# basis_state_entry = state[i]\r\n# # print('basis_state_entry=',basis_state_entry)\r\n#\r\n# basis_state_list = list(basis_state)\r\n#\r\n# list_of_untangled_basis_states = [] # list of strings\r\n# for k in range(len(chosen_qubits)):\r\n# list_of_untangled_basis_states.append(basis_state_list[chosen_qubits[k]])\r\n# del basis_state_list[chosen_qubits[k]]\r\n#\r\n# new_basis_state = ''.join([str(item) for item in basis_state_list])\r\n# new_basis_state_index = int(new_basis_state, 2)\r\n#\r\n# real_basis_vector_state = ''.join([str(item) for item in list_of_untangled_basis_states])\r\n# number_of_basis_vector = int(real_basis_vector_state, 2)\r\n#\r\n# list_of_lists[number_of_basis_vector][new_basis_state_index] = basis_state_entry\r\n#\r\n# list_of_unexistent_states = []\r\n# for j in range(2 ** m):\r\n# if np.all((list_of_lists[j] == 0)) == True:\r\n# output_state_unentangled[j] = 0\r\n# binary_unexistent_basis_state = \"{0:b}\".format(j).zfill(m)\r\n#\r\n# if np.all((list_of_all_0_boolvalues == False)) == True:\r\n# ratio = 0\r\n# for j in range(2**(n-m)):\r\n# if new_state_0[j] == 0 and new_state_1[j] != 0:\r\n# return 'Cannot un-entangle this qubit from the overall state.'\r\n# elif new_state_1[j] == 0 and new_state_0[j] != 0:\r\n# return 'Cannot un-entangle this qubit from the overall state.'\r\n# elif new_state_1[j] != 0 and new_state_0[j] != 0:\r\n# ratio += new_state_0[j]/new_state_1[j]\r\n# break\r\n# # print('ratio=',ratio)\r\n#\r\n# for j in range(2**(n-1)):\r\n# if new_state_0[j] == 0 and new_state_1[j] != 0:\r\n# return 'Cannot un-entangle this qubit from the overall state.'\r\n# elif new_state_1[j] == 0 and new_state_0[j] != 0:\r\n# return 'Cannot un-entangle this qubit from the overall state.'\r\n# elif new_state_1[j] != 0 and new_state_0[j] != 0:\r\n# if ratio != new_state_0[j]/new_state_1[j]:\r\n# return 'Cannot un-entangle this qubit from the overall state.'\r\n#\r\n# return (normalize(new_state_0), normalize([1,1/ratio]))\r\n#\r\n#\r\n# else:\r\n\r\n\r\n\r\n\r\ndef draw_circuit():\r\n anc = QuantumRegister(2, 'a')\r\n qr = QuantumRegister(2, 'q')\r\n # cr = ClassicalRegister(1, 'c')\r\n cr = QuantumRegister(1, 'c')\r\n circuit = QuantumCircuit(anc,qr, cr)\r\n\r\n circuit.cx(qr[0],anc[0])\r\n circuit.cx(qr[1],anc[1])\r\n circuit.cz(qr[0],anc[0],label = 'CZ')\r\n\r\n circuit.cx(qr[0],cr[0])\r\n circuit.cx(qr[1],cr[0])\r\n circuit.fredkin(cr[0],anc[0],anc[1])\r\n circuit.cz(qr[0],anc[0],label = 'CZ')\r\n\r\n circuit.barrier()\r\n\r\n circuit.cx(qr[0],anc[0])\r\n circuit.cx(qr[1],anc[1])\r\n\r\n circuit.draw(output='mpl')\r\n plt.show()\r\n\r\nend_time = time.time()\r\nprint('time=', end_time-start_time)","repo_name":"sabinadragoi/Grover","sub_path":"global gate.py","file_name":"global gate.py","file_ext":"py","file_size_in_byte":10955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9891674657","text":"from concurrent.futures import ThreadPoolExecutor\n\nimport pytest\nfrom botocore.exceptions import ClientError, ParamValidationError\n\n\ndef test_describe_task_definition(ecs):\n with pytest.raises(ClientError):\n # The task definition doesn't exist\n ecs.describe_task_definition(taskDefinition=\"dagster\")\n\n dagster1 = ecs.register_task_definition(\n family=\"dagster\",\n containerDefinitions=[{\"image\": \"hello_world:latest\"}],\n networkMode=\"bridge\",\n memory=\"512\",\n cpu=\"256\",\n )\n dagster2 = ecs.register_task_definition(\n family=\"dagster\",\n containerDefinitions=[{\"image\": \"hello_world:latest\"}, {\"image\": \"busybox\"}],\n memory=\"512\",\n cpu=\"256\",\n )\n\n # It gets the latest revision\n assert ecs.describe_task_definition(taskDefinition=\"dagster\") == dagster2\n # It gets the specific revision\n assert ecs.describe_task_definition(taskDefinition=\"dagster:1\") == dagster1\n assert ecs.describe_task_definition(taskDefinition=\"dagster:2\") == dagster2\n\n # It also works with ARNs\n dagster1_arn = dagster1[\"taskDefinition\"][\"taskDefinitionArn\"]\n dagster2_arn = dagster2[\"taskDefinition\"][\"taskDefinitionArn\"]\n assert ecs.describe_task_definition(taskDefinition=dagster1_arn) == dagster1\n assert ecs.describe_task_definition(taskDefinition=dagster2_arn) == dagster2\n\n with pytest.raises(ClientError):\n # The revision doesn't exist\n ecs.describe_task_definition(taskDefinition=\"dagster:3\")\n\n\ndef test_describe_tasks(ecs):\n assert not ecs.describe_tasks(tasks=[\"invalid\"])[\"tasks\"]\n assert not ecs.describe_tasks(cluster=\"dagster\", tasks=[\"invalid\"])[\"tasks\"]\n\n ecs.register_task_definition(\n family=\"bridge\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n\n default = ecs.run_task(taskDefinition=\"bridge\")\n default_arn = default[\"tasks\"][0][\"taskArn\"]\n default_id = default_arn.split(\"/\")[-1]\n\n dagster = ecs.run_task(taskDefinition=\"bridge\", cluster=\"dagster\")\n dagster_arn = dagster[\"tasks\"][0][\"taskArn\"]\n dagster_id = dagster_arn.split(\"/\")[-1]\n\n # It uses the default cluster\n assert ecs.describe_tasks(tasks=[default_arn]) == default\n # It works with task ARNs\n assert not ecs.describe_tasks(tasks=[dagster_arn])[\"tasks\"]\n assert ecs.describe_tasks(tasks=[dagster_arn], cluster=\"dagster\") == dagster\n\n # And task IDs\n assert ecs.describe_tasks(tasks=[default_id]) == default\n assert not ecs.describe_tasks(tasks=[dagster_id])[\"tasks\"]\n assert ecs.describe_tasks(tasks=[dagster_id], cluster=\"dagster\") == dagster\n\n\ndef test_list_account_settings(ecs):\n assert not ecs.list_account_settings()[\"settings\"]\n assert not ecs.list_account_settings(effectiveSettings=False)[\"settings\"]\n\n settings = ecs.list_account_settings(effectiveSettings=True)[\"settings\"]\n assert settings\n\n task_arn_format_setting = next(\n setting for setting in settings if setting[\"name\"] == \"taskLongArnFormat\"\n )\n assert task_arn_format_setting[\"value\"] == \"enabled\"\n\n\ndef test_list_tags_for_resource(ecs):\n invalid_arn = ecs._task_arn(\"invalid\") # noqa: SLF001\n with pytest.raises(ClientError):\n # The task doesn't exist\n ecs.list_tags_for_resource(resourceArn=invalid_arn)\n\n tags = [{\"key\": \"foo\", \"value\": \"bar\"}, {\"key\": \"fizz\", \"value\": \"buzz\"}]\n ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n arn = ecs.run_task(taskDefinition=\"dagster\")[\"tasks\"][0][\"taskArn\"]\n\n assert not ecs.list_tags_for_resource(resourceArn=arn)[\"tags\"]\n\n ecs.tag_resource(resourceArn=arn, tags=tags)\n\n assert ecs.list_tags_for_resource(resourceArn=arn)[\"tags\"] == tags\n\n # With the new ARN format disabled\n ecs.put_account_setting(name=\"taskLongArnFormat\", value=\"disabled\")\n\n with pytest.raises(ClientError):\n ecs.list_tags_for_resource(resourceArn=arn)\n\n\ndef test_list_task_definitions(ecs):\n assert not ecs.list_task_definitions()[\"taskDefinitionArns\"]\n\n def arn(task_definition):\n return task_definition[\"taskDefinition\"][\"taskDefinitionArn\"]\n\n dagster1 = arn(\n ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n )\n dagster2 = arn(\n ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n )\n other1 = arn(\n ecs.register_task_definition(\n family=\"other\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n )\n\n assert len(ecs.list_task_definitions()[\"taskDefinitionArns\"]) == 3\n assert dagster1 in ecs.list_task_definitions()[\"taskDefinitionArns\"]\n assert dagster2 in ecs.list_task_definitions()[\"taskDefinitionArns\"]\n assert other1 in ecs.list_task_definitions()[\"taskDefinitionArns\"]\n\n\ndef test_list_tasks(ecs):\n assert not ecs.list_tasks()[\"taskArns\"]\n\n ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n ecs.register_task_definition(\n family=\"other\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n\n def arn(response):\n return response[\"tasks\"][0][\"taskArn\"]\n\n default_cluster_dagster_family = arn(ecs.run_task(taskDefinition=\"dagster\"))\n other_cluster_dagster_family = arn(ecs.run_task(taskDefinition=\"dagster\", cluster=\"other\"))\n default_cluster_other_family = arn(ecs.run_task(taskDefinition=\"other\"))\n other_cluster_other_family = arn(ecs.run_task(taskDefinition=\"other\", cluster=\"other\"))\n\n # List using different combinations of cluster and family filters\n response = ecs.list_tasks()\n assert len(response[\"taskArns\"]) == 2\n assert default_cluster_dagster_family in response[\"taskArns\"]\n assert default_cluster_other_family in response[\"taskArns\"]\n\n response = ecs.list_tasks(family=\"dagster\")\n assert len(response[\"taskArns\"]) == 1\n assert default_cluster_dagster_family in response[\"taskArns\"]\n\n response = ecs.list_tasks(cluster=\"other\")\n assert len(response[\"taskArns\"]) == 2\n assert other_cluster_dagster_family in response[\"taskArns\"]\n assert other_cluster_other_family in response[\"taskArns\"]\n\n response = ecs.list_tasks(cluster=\"other\", family=\"dagster\")\n assert len(response[\"taskArns\"]) == 1\n assert other_cluster_dagster_family in response[\"taskArns\"]\n\n\ndef test_put_account_setting(ecs):\n setting = ecs.put_account_setting(name=\"taskLongArnFormat\", value=\"disabled\")[\"setting\"]\n assert setting[\"name\"] == \"taskLongArnFormat\"\n assert setting[\"value\"] == \"disabled\"\n\n # It overrides the default settings\n settings = ecs.list_account_settings(effectiveSettings=True)[\"settings\"]\n assert settings\n\n task_arn_format_setting = next(\n setting for setting in settings if setting[\"name\"] == \"taskLongArnFormat\"\n )\n assert task_arn_format_setting[\"value\"] == \"disabled\"\n\n\n@pytest.mark.flaky(reruns=1)\ndef test_register_task_definition(ecs):\n # Without memory\n with pytest.raises(ClientError):\n ecs.register_task_definition(family=\"dagster\", containerDefinitions=[])\n\n # Without cpu\n with pytest.raises(ClientError):\n ecs.register_task_definition(family=\"dagster\", containerDefinitions=[], memory=\"512\")\n\n # With an invalid memory/cpu combination\n # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html\n with pytest.raises(ClientError):\n ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], memory=\"512\", cpu=\"1\"\n )\n\n # With invalid names\n with pytest.raises(ClientError):\n # Special characters\n ecs.register_task_definition(\n family=\"boom!\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n\n with pytest.raises(ClientError):\n # Too long\n ecs.register_task_definition(\n family=256 * \"a\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n\n response = ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n assert response[\"taskDefinition\"][\"family\"] == \"dagster\"\n assert response[\"taskDefinition\"][\"revision\"] == 1\n assert response[\"taskDefinition\"][\"taskDefinitionArn\"].endswith(\"dagster:1\")\n\n response = ecs.register_task_definition(\n family=\"other\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n assert response[\"taskDefinition\"][\"family\"] == \"other\"\n assert response[\"taskDefinition\"][\"revision\"] == 1\n assert response[\"taskDefinition\"][\"taskDefinitionArn\"].endswith(\"other:1\")\n\n response = ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], memory=\"512\", cpu=\"256\"\n )\n assert response[\"taskDefinition\"][\"family\"] == \"dagster\"\n assert response[\"taskDefinition\"][\"revision\"] == 2\n assert response[\"taskDefinition\"][\"taskDefinitionArn\"].endswith(\"dagster:2\")\n\n response = ecs.register_task_definition(\n family=\"dagster\",\n containerDefinitions=[{\"image\": \"hello_world:latest\"}],\n memory=\"512\",\n cpu=\"256\",\n )\n assert response[\"taskDefinition\"][\"containerDefinitions\"][0][\"image\"] == \"hello_world:latest\"\n\n response = ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n assert response[\"taskDefinition\"][\"networkMode\"] == \"bridge\"\n\n # Secrets default to an empty list\n response = ecs.register_task_definition(\n family=\"secrets\",\n containerDefinitions=[{\"image\": \"hello_world:latest\"}],\n memory=\"512\",\n cpu=\"256\",\n )\n assert response[\"taskDefinition\"][\"containerDefinitions\"][0][\"secrets\"] == []\n\n # Denies concurrent requests per family\n with pytest.raises(ClientError):\n # The task definition doesn't exist\n ecs.describe_task_definition(taskDefinition=\"concurrent\")\n\n with ThreadPoolExecutor(max_workers=2) as executor:\n\n def task():\n ecs.register_task_definition(\n family=\"concurrent\",\n containerDefinitions=[{\"image\": \"hello_world:latest\"}],\n memory=\"512\",\n cpu=\"256\",\n )\n\n # Infrequently, our concurrent futures don't fire fast enough to hit\n # our stubbed ECS's lock. We can force this flaky behavior by firing\n # thousands of futures; by the time the later futures launch, the\n # earlier futures have already completed and released their lock.\n #\n # We've marked this test as flaky and retry it once on failure to\n # try to mitigate this.\n futures = [executor.submit(task) for i in range(2)]\n\n # We successfully registered only 1 task definition revision\n assert (\n ecs.describe_task_definition(taskDefinition=\"concurrent\")[\"taskDefinition\"][\"revision\"] == 1\n )\n # And the other call errored\n assert any(\n \"Too many concurrent attempts to create a new revision of the specified family\"\n in str(future.exception())\n for future in futures\n )\n\n\ndef test_run_task(ecs, ec2, subnet):\n with pytest.raises(ParamValidationError):\n # The task doesn't exist\n ecs.run_task()\n\n with pytest.raises(ClientError):\n # The task definition doesn't exist\n ecs.run_task(taskDefinition=\"dagster\")\n\n ecs.register_task_definition(\n family=\"awsvpc\", containerDefinitions=[], networkMode=\"awsvpc\", memory=\"512\", cpu=\"256\"\n )\n ecs.register_task_definition(\n family=\"bridge\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n\n response = ecs.run_task(taskDefinition=\"bridge\")\n assert len(response[\"tasks\"]) == 1\n assert \"bridge\" in response[\"tasks\"][0][\"taskDefinitionArn\"]\n assert response[\"tasks\"][0][\"lastStatus\"] == \"RUNNING\"\n\n # It uses the default cluster\n assert response[\"tasks\"][0][\"clusterArn\"] == ecs._cluster_arn(\"default\") # noqa: SLF001\n response = ecs.run_task(taskDefinition=\"bridge\", cluster=\"dagster\")\n assert response[\"tasks\"][0][\"clusterArn\"] == ecs._cluster_arn(\"dagster\") # noqa: SLF001\n response = ecs.run_task(\n taskDefinition=\"bridge\",\n cluster=ecs._cluster_arn(\"dagster\"), # noqa: SLF001\n )\n assert response[\"tasks\"][0][\"clusterArn\"] == ecs._cluster_arn(\"dagster\") # noqa: SLF001\n\n # It includes memory and cpu\n assert response[\"tasks\"][0][\"cpu\"] == \"256\"\n assert response[\"tasks\"][0][\"memory\"] == \"512\"\n\n response = ecs.run_task(taskDefinition=\"bridge\", count=2)\n assert len(response[\"tasks\"]) == 2\n assert all([\"bridge\" in task[\"taskDefinitionArn\"] for task in response[\"tasks\"]])\n\n with pytest.raises(ClientError):\n # It must have a networkConfiguration if networkMode is \"awsvpc\"\n ecs.run_task(taskDefinition=\"awsvpc\")\n\n # The subnet doesn't exist\n with pytest.raises(ClientError):\n ecs.run_task(\n taskDefinition=\"awsvpc\",\n networkConfiguration={\"awsvpcConfiguration\": {\"subnets\": [\"subnet-12345\"]}},\n )\n\n # With a real subnet\n response = ecs.run_task(\n taskDefinition=\"awsvpc\",\n networkConfiguration={\"awsvpcConfiguration\": {\"subnets\": [subnet.id]}},\n )\n\n assert len(response[\"tasks\"]) == 1\n assert \"awsvpc\" in response[\"tasks\"][0][\"taskDefinitionArn\"]\n attachment = response[\"tasks\"][0][\"attachments\"][0]\n assert attachment[\"type\"] == \"ElasticNetworkInterface\"\n details = dict(detail.values() for detail in attachment[\"details\"])\n assert details[\"subnetId\"] == subnet.id\n eni = ec2.NetworkInterface(details[\"networkInterfaceId\"])\n assert not eni.association_attribute\n\n # When assigning a public IP\n response = ecs.run_task(\n taskDefinition=\"awsvpc\",\n networkConfiguration={\n \"awsvpcConfiguration\": {\"subnets\": [subnet.id], \"assignPublicIp\": \"ENABLED\"}\n },\n )\n details = dict(detail.values() for detail in response[\"tasks\"][0][\"attachments\"][0][\"details\"])\n assert details[\"subnetId\"] == subnet.id\n eni = ec2.NetworkInterface(details[\"networkInterfaceId\"])\n assert eni.association_attribute.get(\"PublicIp\")\n\n # containers and overrides are included\n ecs.register_task_definition(\n family=\"container\",\n containerDefinitions=[\n {\n \"name\": \"hello_world\",\n \"image\": \"hello_world:latest\",\n \"environment\": [{\"name\": \"FOO\", \"value\": \"bar\"}],\n }\n ],\n networkMode=\"bridge\",\n memory=\"512\",\n cpu=\"256\",\n )\n response = ecs.run_task(taskDefinition=\"container\")\n assert response[\"tasks\"][0][\"containers\"]\n # ECS does not expose the task definition's environment when\n # describing tasks\n assert \"FOO\" not in response\n\n response = ecs.run_task(\n taskDefinition=\"container\",\n overrides={\"containerOverrides\": [{\"name\": \"hello_world\", \"command\": [\"ls\"]}]},\n )\n assert response[\"tasks\"][0][\"overrides\"][\"containerOverrides\"][0][\"command\"] == [\"ls\"]\n\n # With invalid memory and cpu overrides\n with pytest.raises(ClientError):\n ecs.run_task(\n taskDefinition=\"container\",\n overrides={\"cpu\": \"7\"},\n )\n\n # With valid memory and cpu overrides\n response = ecs.run_task(taskDefinition=\"container\", overrides={\"cpu\": \"512\", \"memory\": \"1024\"})\n assert response[\"tasks\"][0][\"overrides\"][\"cpu\"] == \"512\"\n assert response[\"tasks\"][0][\"overrides\"][\"memory\"] == \"1024\"\n\n # With very long overrides\n with pytest.raises(Exception):\n ecs.run_task(\n taskDefinition=\"container\",\n # overrides is limited to 8192 characters including json formatting\n # https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html\n overrides={\"containerOverrides\": [\"boom\" for i in range(10000)]},\n )\n\n\ndef test_stop_task(ecs):\n with pytest.raises(ClientError):\n # The task doesn't exist\n ecs.stop_task(task=ecs._task_arn(\"invalid\")) # noqa: SLF001\n\n ecs.register_task_definition(\n family=\"bridge\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n task_arn = ecs.run_task(taskDefinition=\"bridge\")[\"tasks\"][0][\"taskArn\"]\n\n assert ecs.describe_tasks(tasks=[task_arn])[\"tasks\"][0][\"lastStatus\"] == \"RUNNING\"\n\n response = ecs.stop_task(task=task_arn)\n assert response[\"task\"][\"taskArn\"] == task_arn\n assert response[\"task\"][\"lastStatus\"] == \"STOPPED\"\n\n assert ecs.describe_tasks(tasks=[task_arn])[\"tasks\"][0][\"lastStatus\"] == \"STOPPED\"\n\n\ndef test_tag_resource(ecs):\n tags = [{\"key\": \"foo\", \"value\": \"bar\"}]\n\n invalid_arn = ecs._task_arn(\"invalid\") # noqa: SLF001\n with pytest.raises(ClientError):\n # The task doesn't exist\n ecs.tag_resource(resourceArn=invalid_arn, tags=tags)\n\n ecs.register_task_definition(\n family=\"dagster\", containerDefinitions=[], networkMode=\"bridge\", memory=\"512\", cpu=\"256\"\n )\n arn = ecs.run_task(taskDefinition=\"dagster\")[\"tasks\"][0][\"taskArn\"]\n\n ecs.tag_resource(resourceArn=arn, tags=tags)\n\n # With the new ARN format disabled\n ecs.put_account_setting(name=\"taskLongArnFormat\", value=\"disabled\")\n\n with pytest.raises(ClientError):\n ecs.tag_resource(resourceArn=arn, tags=tags)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-aws/dagster_aws_tests/ecs_tests/test_stubbed_ecs.py","file_name":"test_stubbed_ecs.py","file_ext":"py","file_size_in_byte":17501,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"}
+{"seq_id":"72710163688","text":"import os\r\nimport cmath\r\nimport numpy as np\r\nimport matplotlib.cm as cm\r\nimport matplotlib.pyplot as plt\r\nimport animatplot as amp\r\n\r\nif __name__ == '__main__':\r\n # 必要に応じて変更するパラメータ\r\n # --- ここから ---\r\n \r\n # 入射角(単位は度, 0 < theta_i < 90)\r\n theta_i_deg = 60\r\n\r\n # 偏波:'TE' = TE波, 'TM' = TM波\r\n polarization = 'TE'\r\n\r\n # 媒質1の屈折率\r\n n1 = 1.41\r\n\r\n # 媒質2の屈折率\r\n n2 = 1\r\n\r\n # --- ここまで ---\r\n\r\n \r\n # 定数と関数\r\n # 複素数に対応するためにcmath内の関数を使用している\r\n # (「2種媒質の平面境界における反射と屈折」で使用し\r\n # プログラムを流用したためcmathを使用している)\r\n PI = cmath.pi\r\n EXP = cmath.exp\r\n COS = cmath.cos\r\n SIN = cmath.sin\r\n ACOS = cmath.acos\r\n ASIN = cmath.asin\r\n SQRT = cmath.sqrt\r\n\r\n \r\n # 入射角をラジアンに変換\r\n theta_i = theta_i_deg * PI / 180\r\n\r\n # 反射角\r\n theta_r = theta_i\r\n\r\n # 反射係数\r\n if polarization == 'TE':\r\n R = -1\r\n else:\r\n R = 1\r\n\r\n\r\n # 正弦波振動1周期に対する分割数\r\n step_per_period = 20\r\n\r\n\r\n # 計算領域の設定\r\n xx = np.arange(0, 2, 0.01)\r\n zz = np.arange(-1, 1, 0.01)\r\n Z, X = np.meshgrid(zz, xx)\r\n\r\n # 総合電界/磁���,入射界,反射界を記録する配列の確保\r\n F = np.zeros(np.shape(X))\r\n Fi = np.zeros(np.shape(X))\r\n Fr = np.zeros(np.shape(X))\r\n\r\n\r\n fig, ax = plt.subplots(1, 3)\r\n\r\n\r\n # 電界/磁界の分布を更新する関数 \r\n def update(n):\r\n # 真空中の波の速度\r\n c = 1\r\n\r\n # 真空中の波長\r\n wavelength = 1\r\n\r\n # 角周波数\r\n w = 2 * PI * c / wavelength\r\n\r\n # 周期\r\n period = 2 * PI / w\r\n\r\n # 描画するための時間の刻み幅\r\n dt = period / step_per_period\r\n\r\n # 時刻\r\n t = n * dt\r\n\r\n # 入射波に対する波数ベクトル\r\n ki = 2 * np.pi / (wavelength / n1)\r\n kiz = ki * SIN(theta_i)\r\n kix = -ki * COS(theta_i)\r\n\r\n # 反射波に対する波数ベクトル\r\n kr = ki\r\n krz = kr * SIN(theta_r)\r\n krx = kr * COS(theta_r)\r\n\r\n\r\n # 電界/磁界分布の計算\r\n M, N = np.shape(X)\r\n for i in range(0, M):\r\n for j in range(0, N):\r\n z = Z[i][j]\r\n x = X[i][j]\r\n\r\n # 式(3.1) for TE, (3.23) for TM\r\n Fi[i][j] = EXP(1j * (w * t - (kiz*z + kix*x))).real\r\n\r\n # 式(3.18) fro TE, (3.24) for TM\r\n Fr[i][j] = (R * EXP(1j * (w * t - (krz*z + krx*x)))).real\r\n \r\n F = Fi + Fr\r\n\r\n images = [Fi, F, Fr]\r\n titles = ['incident field', 'total field', \r\n 'reflected field']\r\n for i in range(0, len(images)):\r\n ax[i].imshow(images[i], vmin=-2, vmax=2, cmap='jet')\r\n ax[i].invert_yaxis()\r\n ax[i].set_title(titles[i])\r\n ax[i].set_xlabel('z')\r\n ax[i].set_xticks([])\r\n ax[i].set_ylabel('x')\r\n ax[i].set_yticks([])\r\n\r\n\r\n # アニメーション表示\r\n timeline = amp.Timeline(range(0,step_per_period))\r\n block = amp.blocks.Nuke(update, length=len(timeline))\r\n anim = amp.Animation([block], timeline)\r\n## anim.save_gif(os.path.basename(__file__).split('.')[0])\r\n plt.show()\r\n","repo_name":"tksysi/kit_electromagnetic_waves","sub_path":"09/fig1.py","file_name":"fig1.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13858719879","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.urls import reverse_lazy, path\nfrom .views import *\n\nadmin.autodiscover()\n\napp_name = 'Admin'\n\nurlpatterns = [\n url(r'^deleteuser/$', DeleteUser, name='delete_user'),\n url(r'^delete-user/$', DeleteUserindex, name='Delete_User_index'),\n url(r'^users/$', Allusers, name='allusers'),\n path('view/',ViewUser, name='View_User'),\n url(r'^add-user/$', AddUser.as_view(), name='Add_User'),\n url(r'^edit-user/$', EditUser.as_view(), name='Edit_User'),\n url(r'^change-password/$', PasswordChange.as_view(), name='Password_Change'),\n url(r'^admin/change-status-active$', ChangeStatusActive, name='change_status_active'),\n url(r'^admin/change-status-inactive$', ChangeStatusInactive, name='change_status_inactive'),\n url(r'^admin/change-status-delete$', ChangeStatusDelete, name='change_status_delete'),\n url(r'^admin/download-CSV$', Download_CSV, name='download_CSV'),\n url(r'^admin/Upload-CSV$', UploadCSV, name='Upload_CSV'),\n ]\n","repo_name":"MukhtarAhmadZargar/BaseDajngo","sub_path":"Admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"375729827","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\ndef copy_description_into_content_source(apps, schema_editor):\n CategoryModel = apps.get_model('blog', 'Category')\n for category in CategoryModel.objects.all():\n category.content_source = category.description\n category.save()\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0013_auto_20151217_1938'),\n ]\n\n operations = [\n migrations.RunPython(copy_description_into_content_source,\n reverse_code=migrations.RunPython.noop),\n ]\n","repo_name":"FarmCodeGary/InspirationForge","sub_path":"blog/migrations/0014_auto_20151217_1939.py","file_name":"0014_auto_20151217_1939.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"3878755318","text":"\nimport random\nfrom string import ascii_uppercase\nfrom string import digits\n\n\n\n\nclass Maladie:\n \"\"\" on a initialiser une lise des maladies \"\"\"\n listeNomMaladie = {\n 1:\"Condylomes\",\n 2:\"KysteOvarien\",\n 3:\"FibromeUterien\",\n 4:\"KysteVaginal\",\n 5:\"Endometriose\",\n 6:\"Salpingite\",\n 7:\"Vulvovaginite\",\n 8:\"Vaginite\",\n 9:\"cancer\"\n }\n \"\"\" constaté par le patient \"\"\"\n nom:str\n symptomes:list\n id:str\n carateristiques:str\n \"\"\" on cherche a initialiser un maladie commune ou un cancer (qui a des parametre aditionel)\"\"\"\n def __init__(self,*arg):\n \"\"\" *arg comptient l'essemble des argument passé \"\"\"\n self.generateID()\n if(len(arg)==3):\n self.nom = self.listeNomMaladie[arg[0]] \n self.symptomes = arg[1]\n self.carateristiques = arg[2]\n if(len(arg)==6):\n if(arg[0]==9):\n self.__init__(arg[0],arg[1],arg[2])\n return print(\"ce n'est pas un cancer \")\n self.generateID()\n self.nom = self.listeNomMaladie[arg[0]]\n self.symptomes = arg[1]\n self.carateristiques = arg[2]\n self.causes = arg[3]\n self.risques = arg[4]\n self.stades = arg[5]\n\n \n\n def generateID(self,size= 10,chars= ascii_uppercase + digits):\n chaine = ''.join(random.choice(chars) for _ in range(size))\n chaine = random.sample(chaine,len(chaine))\n chaine = ''.join([str(item) for item in chaine ])\n self.id = chaine\n\n \n\n\n \n\n\n def rtrnObj(self):\n if(self.nom and self.nom!=\"cancer\"):\n return {\n \"id\":self.id,\n \"nom\":self.nom,\n \"symptomes\":self.symptomes,\n \"caracteristique\":self.carateristiques\n } \n else:\n return {\n \"id\":self.id,\n \"nom\":self.nom,\n \"symptomes\":self.symptomes,\n \"caracteristique\":self.carateristiques,\n \"causes\": self.causes,\n \"risques\": self.risques,\n \"stades\": self.stades\n } ","repo_name":"iaiglsib/gynecologie","sub_path":"classe/Maladie.py","file_name":"Maladie.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30774413697","text":"'''\nCreated on 29 Apr 2015\n\n@author: KDMJUN001\n'''\n#Importing created classes\nimport InternalGeometry as IG\nfrom ExternalOrientationDict import ExternalOrientation\nfrom InternalOrientationDict import InternalOrientation\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\n\nimport pylab\n\n\n#Python Libraries\nimport numpy as np\nfrom random import randint, random, uniform\nimport sympy as sy\n\nsym_x,sym_y,sym_x0,sym_y0,sym_X0,sym_Y0,sym_Z0,sym_omega,sym_phi,sym_kappa,sym_c = sy.symbols('x,y,x0,y0,X0,Y0,Z0,omega,phi,kappa,c')\n\nclass Execute(object):\n '''\n classdocs\n '''\n def __init__(self):\n '''\n Constructor\n '''\n \n def Setup(self):\n \n self.imageGrid()\n \n \n def imageGrid(self):\n #Creation of n random points\n image1 = []\n global n\n n = 30\n for i in range(0,n):\n image1 += [[uniform(-0.3,0.3),uniform(-0.3,0.3)]]\n \n #Creation of scale for each point\n for i in range(0,n):\n image1[i].append((randint(90,100)))\n \n #Converting list to numpy array to allow for easy indexing\n global image1Array\n\n image1Array = (np.asarray(image1))\n \n# print(\"Randomly Generated Image Points with Scale\")\n# for i in range(0,n):\n# print (image1[i])\n# print()\n \n self.cameraInterior()\n \n def cameraInterior(self):\n global cameraInternal\n cameraInternal = InternalOrientation()\n \n #All values sent are in mm \n #name, c,xi,yi\n IP = ['Canon',0.3,0,0]\n cameraInternal.keys(IP)\n \n self.cameraOrientation()\n \n def cameraOrientation(self):\n #Setting camera parameters for image 1\n global cameraExternal\n cameraExternal = ExternalOrientation()\n\n #Coordinates for Kings Battery = 50750.110, 58299.800\n #U0,V0,W0,phi,omega,kappa,scale,camera\n cameraOne = [0, 0, 10, np.radians(0),np.radians(0),np.radians(0),(1/100),'cameraOne']\n cameraExternal.keys(cameraOne)\n\n cameraTwo = [10, 0, 10,np.radians(0.5),np.radians(0.5),np.radians(0.05),(1/100),'cameraTwo']\n cameraExternal.keys(cameraTwo)\n \n self.ObjectPoints()\n \n \n def ObjectPoints(self):\n omega,phi,kappa= sy.symbols('omega,phi,kappa')\n \n omega = cameraExternal['cameraOne'].omega\n phi = cameraExternal['cameraOne'].phi\n kappa = cameraExternal['cameraOne'].kappa\n \n Rphi = np.matrix([[sy.cos(phi),0,-sy.sin(phi)],\n [0, 1, 0],\n [sy.sin(phi),0,sy.cos(phi)]])\n \n Romega = np.matrix([[1, 0, 0],\n [0,sy.cos(omega), sy.sin(omega)],\n [0,-sy.sin(omega),sy.cos(omega)]])\n \n Rkappa = np.matrix([[sy.cos(kappa),sy.sin(kappa), 0],\n [-sy.sin(kappa),sy.cos(kappa),0],\n [0, 0, 1]])\n global R\n# R = Romega*Rkappa*Rphi\n R = Rphi*Rkappa*Romega\n projectionCentre = [[cameraExternal['cameraOne'].U0],[cameraExternal['cameraOne'].V0],[cameraExternal['cameraOne'].W0]]\n \n global objectPoints1\n objectPoints1 = []\n \n for i in range(0,n):\n \n internal = [[image1Array[i,0]], #x\n [image1Array[i,1]], #y\n [-cameraInternal['Canon'].c]] #focal length \n \n point = image1Array[i,2]*R*internal+projectionCentre\n\n objectPoints1 += [point]\n \n objectPoints1 = np.asarray(objectPoints1)\n \n# for i in range (0,n):\n# print(objectPoints1[i][0],objectPoints1[i][1],objectPoints1[i][2],\"\\n\")\n global objectPointsError\n objectPointsError = []\n \n #Adds small error to points\n for i in range(0,n):\n \n error = [objectPoints1[i,0]+(random()/10),objectPoints1[i,1]+(random()/10),objectPoints1[i,2]+(random()/10)]\n objectPointsError += [error]\n \n objectPointsError = np.asarray(objectPointsError)\n \n self.ImagePoints2()\n \n def ImagePoints2(self):\n \n image2 = []\n \n for i in range (0,n):\n\n x = (-cameraInternal['Canon'].c)*((R[0,0]*(objectPoints1[i,0]-cameraExternal['cameraTwo'].U0)\n +R[0,1]*(objectPoints1[i,1]-cameraExternal['cameraTwo'].V0)\n +R[0,2]*(objectPoints1[i,2]-cameraExternal['cameraTwo'].W0))/(R[2,0]*(objectPoints1[i,0]-cameraExternal['cameraTwo'].U0)+\n R[2,1]*(objectPoints1[i,1]-cameraExternal['cameraTwo'].V0)+\n R[2,2]*(objectPoints1[i,2]-cameraExternal['cameraTwo'].W0)))\n \n y = (-cameraInternal['Canon'].c)*((R[1,0]*(objectPoints1[i,0]-cameraExternal['cameraTwo'].U0)\n +R[1,1]*(objectPoints1[i,1]-cameraExternal['cameraTwo'].V0)\n +R[1,2]*(objectPoints1[i,2]-cameraExternal['cameraTwo'].W0))/(R[2,0]*(objectPoints1[i,0]-cameraExternal['cameraTwo'].U0)+\n R[2,1]*(objectPoints1[i,1]-cameraExternal['cameraTwo'].V0)+\n R[2,2]*(objectPoints1[i,2]-cameraExternal['cameraTwo'].W0)))\n \n image2 += [[x,y]]\n \n global image2Array\n image2Array = np.asarray(image2)\n \n global image2Error\n image2Error = []\n \n #Adds small error to points\n for i in range(0,n):\n \n error = [image2Array[i,0],image2Array[i,1]]\n image2Error += [error]\n \n image2Error = np.asarray(image2Error) \n \n self.lies()\n self.output()\n \n def ObjectPoints2(self):\n projectionCentre = [[cameraExternal['cameraTwo'].U0],[cameraExternal['cameraTwo'].V0],[cameraExternal['cameraTwo'].W0]]\n \n global objectPoints2\n objectPoints2 = []\n \n for i in range(0,n):\n \n internal = [[image2Array[i,0]], #x\n [image2Array[i,1]], #y\n [-cameraInternal['Canon'].c]] #focal length \n \n point = image2Array[i,2]*R*internal+projectionCentre\n\n objectPoints2 += [point]\n \n objectPoints2 = np.asarray(objectPoints2)\n \n self.output()\n \n def output(self):\n fig = plt.figure(figsize=(80,60))\n ax = fig.add_subplot(1,1,1,projection ='3d')\n plt.title('Projection Centre (0,0,10), Principal Distance = 0.3m, Scale = 1:100')\n \n ax.scatter(0, 0, 10, c='blue')\n ax.scatter(10, 0, 10, c='red')\n\n \n for i in range(0,n):\n# #Image 1 points\n# ax.scatter(image1Array[i][0]+10,image1Array[i][1]+10, c='blue')\n# #Image 2 points\n# ax.scatter(image2Error[i][0]+10,image2Error[i][1], c='red')\n #Object points 1\n ax.scatter(objectPoints1[i][0],objectPoints1[i][1],objectPoints1[i][2], c='blue')\n #Object points 2\n ax.scatter(objectPointsError[i][0],objectPointsError[i][1],objectPointsError[i][2], c='red') \n #Projection Rays - Image 1\n ax.plot([0,objectPoints1[i][0]],[0,objectPoints1[i][1]],[10,objectPoints1[i][2]], c='blue')\n# #Projection rays - Image 2\n ax.plot([10,objectPointsError[i][0]],[0,objectPointsError[i][1]],[10,objectPointsError[i][2]], c='red')\n \n\n# plt.show()\n self.imagepointsplot()\n\n def imagepointsplot(self):\n# Code for 3D plot of image points\n# fig2 = plt.figure(figsize=(80,60))\n# bx = fig2.add_subplot(1,1,1,projection='3d')\n \n# for i in range(0,n):\n# bx.scatter(image1Array[i][0],image1Array[i][1], c='blue', s = 50)\n# bx.scatter(image2Error[i][0],image2Error[i][1], c='red', s = 50)\n\n for i in range(0,n):\n plt.scatter(image1Array[i][0],image1Array[i][1], c='green')\n plt.scatter(image2Error[i][0],image2Error[i][1], c='red')\n \n# plt.show()\n \n def lies(self):\n sum1 = []\n for i in range(0,n):\n one = objectPoints1[i][0]-objectPointsError[i][0]\n two = objectPoints1[i][1]-objectPointsError[i][1]\n three = objectPoints1[i][2]-objectPointsError[i][2]\n \n print(image1Array[i][0])\n print(image1Array[i][1])\n print(image2Error[i][0])\n print(image2Error[i][1],\"\\n\")\n print (objectPoints1[i][0])\n print (objectPoints1[i][1])\n print (objectPoints1[i][2], \"\\n\\n\")\n sum1 += [one+two+three]\n \n mean_error = (sum(sum1)/(len(sum1)))\n sum2 = np.asarray(sum1)\n sum_sss = []\n \n for i in range(0,len(sum1)):\n sum_sss += [(sum2[i]-mean_error)]\n \n print(mean_error)\n print(1/(len(sum1))*(sum(sum_sss))**2)\n print()\n \n count = 0\n \n def errors(count,mean_error):\n x = random()\n if x < abs(mean_error):\n print (x)\n count += 1\n if count < 6:\n errors(count,mean_error)\n elif count == 6:\n pass\n else:\n errors(count,mean_error)\n errors(count,mean_error)\n","repo_name":"JayrenKadamen/UCT","sub_path":"APG4011F/Assignment 3/Camera_Resection/Execution.py","file_name":"Execution.py","file_ext":"py","file_size_in_byte":9671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32088906369","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 29 13:22:21 2019\n\n@author: Simon\n\"\"\"\n\n# https://towardsdatascience.com/machine-learning-simple-linear-regression-with-python-f04ecfdadc13\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\n\n# Importing the dataset\ndataset = pd.read_csv(\n 'salary_data.csv') # or try with salary_data_not_linear.csv\nX = dataset.iloc[:, :-1].values \ny = dataset.iloc[:, 1].values\n\n# Splitting the dataset into the Training set and Test set\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=1/3, random_state=0)\n\n# Fitting Simple Linear Regression to the Training set\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Visualizing the Training set results\nviz_train = plt\nviz_train.scatter(X_train, y_train, color='red')\nviz_train.plot(X_train, regressor.predict(X_train), color='blue')\nviz_train.title('Salary VS Experience (Training set)')\nviz_train.xlabel('Year of Experience')\nviz_train.ylabel('Salary')\nviz_train.show()\n\n# Visualizing the Test set results\nviz_test = plt\nviz_test.scatter(X_test, y_test, color='red')\nviz_test.plot(X_train, regressor.predict(X_train), color='blue')\nviz_test.title('Salary VS Experience (Test set)')\nviz_test.xlabel('Year of Experience')\nviz_test.ylabel('Salary')\nviz_test.show()\n\n# Predicting the result of 5 Years Experience\ny_pred_5 = regressor.predict([[5]])\nprint('Prediction of 5: %.2f' % y_pred_5)\n\n# Predicting the Test set results\ny_preds_test = regressor.predict(X_test)\n\n# Visualizing the predicted results of the test set, and 5 years experience\nviz_predicted_test = plt\nviz_predicted_test.scatter(X_test, y_test, color='red')\nviz_predicted_test.scatter(X_test, y_preds_test, color='blue')\nviz_predicted_test.scatter([[5]], y_pred_5, color='green', zorder=10, s=75)\nviz_predicted_test.plot(X_train, regressor.predict(X_train), color='blue')\nviz_predicted_test.show()\n\n# Predicting the training set results\ny_preds_train = regressor.predict(X_train)\n\n# Visualizing the predicted results of the training set, and 5 years experience\nviz_predicted_train = plt\nviz_predicted_train.scatter(X_train, y_train, color='red')\nviz_predicted_train.scatter(X_train, y_preds_train, color='blue')\nviz_predicted_train.scatter([[5]], y_pred_5, color='green', zorder=10, s=75)\nviz_predicted_train.plot(X_train, regressor.predict(X_train), color='blue')\nviz_predicted_train.show()\n","repo_name":"simontrigowhite/TryPython","sub_path":"linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70339537769","text":"import gym\nfrom gym import spaces\nimport numpy as np\nfrom environments.machines.machine import Machine\nfrom simulators.eventsimulator2 import EventSimulator2\nfrom configparser import ConfigParser\n\n\nclass JSPEnv2(gym.Env):\n \"\"\"\n Description:\n\n A system contains 1 (consider 5 later) machines and produces 2 products (Pa and Pb) with 2 operation flows. The release\n interarrival times of Pa and Pb are exponentially distributed with mean 5 and 8 time units. The travel\n times and processing times also follow exponential distributions. The objective is to minimize the cycle\n time.\n\n Personal Thought: When there is only a machine, job release time should = job arrival time\n\n Source:\n https://www.informs-sim.org/wsc17papers/includes/files/329.pdf\n\n Observation:\n Type:\n Num\tObservation Min Max\n 0 release interarrival time of products exp distributed between 5 and 8\n 1 pt 30 60\n\n Type: Box(3)\n Num\tObservation Min Max\n 0 (m1)num of jobs being travelling to (x_m) 0 5\n 1 (m1)num of jobs being waiting in front of (y_m) 0 5\n 2 (m1)num of jobs being processed on (z_m) 0 1\n 4 raw processing time\n # * 5 (consider only a machine first)\n\n 0\tCart Position -4.8 4.8\n 1\tCart Velocity -Inf Inf\n 2\tPole Angle -24 deg 24 deg\n 3\tPole Velocity At Tip -Inf Inf\n\n Actions:\n Type: Discrete(2)\n Num\tAction\n 0 select one job to process\n 1 no job is selected and the machine keeps idle\n\n Reward:\n r(s'|s, a) = (1-k)*r_a + k*\\deta*r_s, 0 job_to_process.due_t:\n self.machine.process_job(job_to_process, t)\n # del self.waiting_jobs[0]\n assigned_job.pt -= t - assigned_job.start_processing_t\n self.waiting_jobs[0] = assigned_job\n else:\n self.machine.process_job(job_to_process, t)\n del self.waiting_jobs[0]\n new_state = len(self.waiting_jobs)\n #print(\"Debug in JobEnv, new_state is \", new_state)\n # forward one step, and calculate tardiness\n # May/18/2020, only calculate the tardiness of finished job at t+1\n updated_machine, tardi = event_simu.check_machine_idle_and_update(t+1, self.machine, granularity)\n # tardi = self.machine.assigned_job.pt + self.machine.assigned_job.start_processing_t - t\n '''\n # calculate current total tardiness as reward\n reward = 0\n for j in self.waiting_jobs:\n tardiness = (t+j.pt)-j.due_t\n if tardiness < 0:\n tardiness = 0\n reward += tardiness\n print(\"At time \", t, \" pt \", j.pt, \" due_t \", j.due_t, \" tard \", tardiness)\n processed_pt = t - self.machine.assigned_job.start_processing_t\n remained_pt = self.machine.assigned_job.pt - processed_pt\n machine_job_tard = (t+remained_pt) - self.machine.assigned_job.due_t\n if machine_job_tard < 0:\n machine_job_tard = 0\n reward += machine_job_tard\n reward = -1*reward # cz this is tardiness\n print(\"Getting reward \", reward, \" from action \", action)\n '''\n\n done = bool(new_state == 0)\n # Dec/10/2020: use Just in time, so get the absolute value instead of lateness.\n return new_state, abs(tardi), done, updated_machine #{}\n\n def takeDueTime(self, job):\n # print(\"job type \", self.criteria)\n if self.criteria == 1:\n return job.due_t\n if self.criteria == 2:\n return job.due_t + job.pt\n\n\n def debug_waiting_jobs(self):\n for j in self.waiting_jobs:\n print(j.to_string())\n\n def reset(self, simulator):\n # initialize machines and its job being processed\n # reset time t = 0, seed = 0\n simulator.reset()\n machine = Machine()\n job = simulator.arrive_new_job(1)#self.create_jobs(1)\n machine.process_job(job[0], 0)\n self.machine = machine\n # always start from 3 waiting jobs\n self.waiting_jobs = simulator.arrive_new_job(3)\n # print(\"Debug created machine with size \", len(machines))\n self.action_space = spaces.Discrete(len(self.waiting_jobs))\n self.state = len(self.waiting_jobs)+1 # plus dummy one #np.array([travel_jobs, wait_jobs, machines, i]) # np.array([num_wait, num_process, num_travel])\n self.TT = 0\n return self.state\n\n '''\n def create_jobs(self, num):\n jobs = []\n for i in range(num):\n pt = random.randint(self.min_pt, self.max_pt)\n job = Job(0, pt, pt)\n jobs.append(job)\n return jobs\n '''\n\n\nif __name__ == '__main__':\n env = JSPEnv2()\n i = 0\n for i_episode in range(1):\n # iterate every time step to check if there is\n # 1) a job arrives at an idle machine;\n # 2) a machine with a non-empty queue becomes idle\n _conf = ConfigParser()\n _conf.read('app.ini')\n event_simu = EventSimulator2(_conf)\n observation = env.reset(event_simu)\n granularity = 1 # for calculating the remaining pt\n for t in range(100):\n print(\"Observation[0] is \", observation[0])\n # Check decision epoch according to events\n # job release/job arrival (simulation strategy to be used?)\n # /machine idle\n # env.state[2] is machine list\n events = event_simu.event_simulation(t, env.machines, granularity)\n # update pt\n released_new_jobs = events[1]\n for new_job in released_new_jobs:\n env.raw_pt += new_job.pt\n machines = events[2]\n env.state[2] = machines\n env.remain_raw_pt -= events[3]\n # jobs in the related queue at a decision epoch make up an action set (\n # plus a dummy action 0)\n # if no released and waited job, then dummy action\n if len(env.state[0]) == 0 and len(env.state[1]) == 0:\n action = 0\n else:\n env.action_space = spaces.Discrete(len(env.state[0])) # change to state[1] later\n action = env.action_space.sample() # randomly select\n observation, reward, done, info = env.step(action, events, t)\n if done:\n print(\"Episode finished after {} timesteps\".format(t + 1))\n break\n i = env.state[3] # update id, useful for the reset()\n print(\"Getting reward \", reward, \" state id \", env.state[3])\n print(\"Finished!\")\n","repo_name":"Yuanyuan517/RL_OnlineScheduling","sub_path":"realtime_jsp/environments/JSPEnv2.py","file_name":"JSPEnv2.py","file_ext":"py","file_size_in_byte":11971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"13321249917","text":"from .plot_2d_separator import plot_2d_classification, plot_2d_separator\nfrom .plot_helpers import cm2, cm3\nfrom .plot_agglomerative import plot_agglomerative, plot_agglomerative_algorithm\nfrom .plot_improper_preprocessing import plot_improper_processing, plot_proper_processing\nfrom .plot_cross_validation import (plot_threefold_split, plot_label_kfold,\n plot_shuffle_split, plot_cross_validation,\n plot_stratified_cross_validation)\n\nfrom .plot_grid_search import plot_grid_search_overview, plot_cross_val_selection\nfrom .plot_metrics import (plot_confusion_matrix_illustration,\n plot_binary_confusion_matrix,\n plot_decision_threshold)\n\n__all__ = ['plot_2d_classification',\n 'plot_2d_separator',\n 'cm3', 'cm2', 'plot_improper_processing', 'plot_proper_processing',\n 'plot_label_kfold',\n 'plot_shuffle_split',\n 'plot_stratified_cross_validation',\n 'plot_threefold_split',\n 'plot_cross_validation',\n 'plot_grid_search_overview',\n 'plot_cross_val_selection',\n 'plot_confusion_matrix_illustration',\n 'plot_binary_confusion_matrix',\n 'plot_decision_threshold'\n ]\n","repo_name":"Karl-WangSK/ML_sklearn","sub_path":"1.Scikit-Learn_Machine_Learning/tools/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27944487924","text":"#Task 22 - 24/6/2023\r\n#Vevan O Narain S7-C\r\n\r\n\"\"\"WAPP to to print duplicates from a list of integers\"\"\"\r\n\r\nl = []\r\n\r\nfor i in range(10):\r\n num = input(\"Enter Number: \")\r\n l.append(num)\r\n \r\nuniqueList = []\r\nduplicateList = []\r\n \r\nfor i in l:\r\n if i not in uniqueList:\r\n uniqueList.append(i)\r\n elif i not in duplicateList:\r\n duplicateList.append(i)\r\n \r\nprint(duplicateList)","repo_name":"vevanonarain/Class-12-practical-report-file","sub_path":"Practical Report File/Task22.py","file_name":"Task22.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35995649859","text":"from eviction_tracker import detainer_warrants\nfrom datetime import datetime\nimport time\nfrom flask import current_app\nimport shutil\nimport os\nfrom eviction_tracker.detainer_warrants.models import DetainerWarrant\nfrom eviction_tracker.admin.models import User, user_datastore\nfrom .email import export_notification\nfrom flask_mail import Attachment\nfrom .time_util import millis_timestamp, file_friendly_timestamp\n\n\nclass Task:\n def __init__(self, id, requester):\n self.id = id\n self.requester = requester\n self.started_at = datetime.now()\n\n def to_json(self):\n return {\"id\": self.id, \"started_at\": millis_timestamp(self.started_at)}\n\n\ndef export_zip(app, task, date_range):\n with app.app_context():\n export_dir = task.id\n export_path = f\"{current_app.config['DATA_DIR']}/davidson-co/eviction-data/export/{export_dir}\"\n if not os.path.exists(export_path):\n os.makedirs(export_path)\n\n current_user = user_datastore.find_user(email=task.requester[\"email\"])\n omit_defendant_info = not current_user.can_access_defendant_data()\n csv_filename = \"detainer-warrants.csv\"\n detainer_warrants.exports.warrants_to_csv(\n export_path + \"/\" + csv_filename,\n start_date=date_range[\"start\"],\n end_date=date_range[\"end\"],\n omit_defendant_info=omit_defendant_info,\n )\n\n judgment_csv_filename = \"judgments.csv\"\n detainer_warrants.exports.judgments_to_csv(\n export_path + \"/\" + judgment_csv_filename,\n start_date=date_range[\"start\"],\n end_date=date_range[\"end\"],\n omit_defendant_info=omit_defendant_info,\n )\n\n shutil.make_archive(export_path, \"zip\", export_path)\n\n attachments = []\n with open(export_path + \".zip\", \"rb\") as fp:\n attachments.append(\n Attachment(\n filename=f\"eviction-data-davidson-co-{file_friendly_timestamp(task.started_at)}.zip\",\n content_type=\"application/zip\",\n data=fp.read(),\n )\n )\n\n first_file_date = (\n DetainerWarrant.query.order_by(DetainerWarrant._file_date.asc())\n .first()\n .file_date\n )\n\n export_notification(\n task,\n attachments,\n start_date=date_range.get(\"start\", first_file_date),\n end_date=date_range.get(\"end\", task.started_at),\n )\n","repo_name":"red-door-collective/eviction-tracker","sub_path":"eviction_tracker/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"36568975046","text":"\r\n## recorta o especto entre a e b\r\n \r\ndef cut(data,a,b):\r\n sel1 = (data['wn'] > a )\r\n sel2 = (data['wn'] < b )\r\n ver = (sel1.astype(int) + sel2.astype(int))-1\r\n sel = ver.astype(bool)\r\n data['r'] = data['r'][:,sel]\r\n data['wn'] = data['wn'][sel]\r\n return data\r\n\r\n## plota o histograma de area da imagem\r\n\r\ndef norm(data):\r\n import numpy as np\r\n spc = data['r']\r\n media = np.mean(spc,axis=1)\r\n std = np.std(spc,axis=1)\r\n data['r'] = np.divide((spc - media[:,None]),std[:,None])\r\n return data\r\n\r\n\r\ndef golay(data,diff,order,win):\r\n import numpy as np\r\n from scipy.signal import savgol_coeffs\r\n from scipy.sparse import spdiags\r\n import numpy.matlib\r\n n = int((win-1)/2)\r\n sgcoeff = savgol_coeffs(win, order, deriv=diff)[:,None]\r\n sgcoeff = np.matlib.repmat(sgcoeff,1,data['r'].shape[1])\r\n diags = np.arange(-n,n+1)\r\n D = spdiags(sgcoeff,diags,data['r'].shape[1],data['r'].shape[1]).toarray()\r\n D[:,0:n] = 0\r\n D[:,data['r'].shape[1]-5:data['r'].shape[1]] = 0\r\n data['r'] = np.dot(data['r'],D)\r\n return data\r\n\r\n \r\ndef norm2r(data,ini1,fim1,ini2,fim2):\r\n import numpy as np\r\n sel = np.logical_and(data['wn'] > int(ini1),data['wn'] < int(fim1))\r\n r1 = data['r'][:,sel]\r\n wn1 = data['wn'][sel][:,None]\r\n media = np.mean(r1,axis=1)\r\n std = np.std(r1,axis=1)\r\n r1 = np.divide((r1 - media[:,None]),std[:,None])\r\n \r\n sel = np.logical_and(data['wn'] > int(ini2),data['wn'] < int(fim2))\r\n r2 = data['r'][:,sel]\r\n wn2 = data['wn'][sel][:,None]\r\n media = np.mean(r2,axis=1)\r\n std = np.std(r2,axis=1)\r\n r2 = np.divide((r2 - media[:,None]),std[:,None])\r\n data['r'] = np.column_stack((r1,r2))\r\n data['wn'] = np.vstack((wn1,wn2))\r\n data['wn'] = data['wn'].reshape(-1)\r\n return data \r\n\r\ndef pcares(data,n):\r\n import numpy as np\r\n from sklearn.decomposition import PCA\r\n pca = PCA()\r\n media = np.mean(data['r'],axis=0)\r\n pca.fit(data['r']-media)\r\n scoress = pca.transform(data['r'])\r\n scoress[:,n-1:-1] = 0 \r\n coeff= pca.components_\r\n data['r'] =media + np.dot(scoress,coeff)\r\n return data\r\n\r\ndef offset(data,ini,fim):\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n sel = np.logical_and(data['wn'] > int(ini),data['wn'] < int(fim));\r\n r = data['r'][:,sel];\r\n minino = np.min(r,axis=1);\r\n minino = np.reshape(minino,(-1,1));\r\n minino = np.tile(minino,data['r'].shape[1]);\r\n data['r'] = data['r']-minino;\r\n return data\r\n\r\ndef binned(data):\r\n import numpy as np\r\n import matplotlib.pyplot as plt \r\n r = data['r']\r\n r = r.reshape(data['dx'],data['dy'],-1)\r\n dx = r.shape[0]\r\n dy = r.shape[1]\r\n dz = r.shape[2]\r\n dxbin = int(np.floor(dx/2))+2\r\n dybin = int(np.floor(dy/2))+2\r\n rbin = np.ones((dxbin,dybin,dz))\r\n jj = 0\r\n ii = 0\r\n for i in range(0,dy-2,2):\r\n for j in range(0,dx-2,2):\r\n sel = r[j:j+2,i:i+2,:];\r\n sel = np.mean(sel.reshape(4,dz),axis=0)\r\n rbin[jj,ii,:] = sel\r\n jj = jj + 1\r\n \r\n jj = 0\r\n ii = ii + 1\r\n data['r'] = rbin.reshape((dxbin*dybin,dz)) \r\n data['sel'] = np.ones((dxbin*dybin,)).astype('bool')\r\n data['dx'] = dxbin\r\n data['dy'] = dybin\r\n \r\n return data\r\n\r\n","repo_name":"tmartinip/hsp2","sub_path":"hsp_prep.py","file_name":"hsp_prep.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31557913821","text":"from robot import Robot\n\nrobot = Robot()\nprint(\"Input exit to exit\")\n\nwhile True:\n command = input(\">\")\n if command.upper() == \"REPORT\":\n print(robot.report())\n elif command.upper() == \"EXIT\":\n break\n robot.parser(command)\n","repo_name":"TristoKrempita/toy-robot-task","sub_path":"robot_caller.py","file_name":"robot_caller.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74069381288","text":"from bokeh.server.server import Server\nfrom bokeh.plotting import figure, ColumnDataSource\n\ndef make_document(doc):\n fig = figure(title='Line plot!', sizing_mode='scale_width')\n fig.line(x=[1, 2, 3], y=[1, 4, 9])\n\n doc.title = \"Hello, world!\"\n doc.add_root(fig)\n\nserver = Server({'/': make_document}, port=5008)\nserver.start()\n\nif __name__ == '__main__':\n print('Opening Bokeh application on http://localhost:5006/')\n\n server.io_loop.add_callback(server.show, \"/\")\n server.io_loop.start()","repo_name":"foobarbecue/ultrahelicorder","sub_path":"precursors/bokeh_standalone.py","file_name":"bokeh_standalone.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"2982345209","text":"from queue import Queue\nfrom turtle import back\n\n\ndef displayGraph(graph):\n for y,x in graph:\n print(str((y,x))+\" : \"+str(graph[(y,x)]))\n\n\ndef createAdjacencyListFromList(grid):\n graph = {}\n\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] != -1:\n node = (i,j)\n graph[node] = []\n\n #check adjacent neighbours : up, down, left, right\n for di, dj in [(-1,0), (1,0), (0,-1), (0,1), (-1,-1), (1,1), (-1,1), (1,-1)]:\n xi, xj = i+di, j+dj\n\n if (xi>=0 and xi=0 and xj\nmode_choice_data_dir = 'mode_choice/'\nsupplement_data_dir = 'nhts_raw/'\noutput_plot_dir = 'plot/'\n\n# gems_output_dir_2 = 'GEMS_output/LA_local/'\nparam_dir = 'parameter/'\n\nmode_choice_data_source = 'NHTS/NHTS_tract_mode.split.National.RData'\nmode_choice_geotype_id = 'NHTS/nhts_no_ids_1hrtimebins_with_imputation.csv'\nmode_choice_microtype_id = 'NHTS/NHTS_tract_od.tr.purpose.National_transgeo.RData'\nsupplement_data_source = 'trippub.csv'\nhousehold_data_source = 'hhpub.csv'\n\ntime_period_file = 'TimePeriods_TEMPO.csv'\n# CA_houshold_file = 'household_ca.RData'\nnational_geoid_lookup_file = 'ccst_geoid_key_tranps_geo_with_imputation.csv'\nstate_region_file = 'NHTS_region_state.csv'\ndistance_matrix = 'distance_matrix_by_tracts.csv'\n\nnSubBins = 1\nmps_to_mph = 2.23694\nmode_lookup = {1: 'walk', 2: 'bike', 3: 'auto', 4: 'auto', 5: 'auto', 6: 'auto', 7: 'scooter', 8: 'scooter', 9: 'auto', 10: 'bus', 11: 'bus',\n 12: 'bus', 13: 'bus', 14: 'bus', 15: 'rail', 16: 'rail', 17: 'taxi', 18: 'auto', 19: 'other',\n 20: 'other', 97: 'other'}\n\n# load NHTS dataset\nmode_choice_data = pyreadr.read_r(mode_choice_data_dir + mode_choice_data_source)\nmode_choice_data_df = mode_choice_data['mode.split']\nprint(mode_choice_data_df.columns)\nod_microtype_data = pyreadr.read_r(mode_choice_data_dir + mode_choice_microtype_id)\nod_microtype_data_df = od_microtype_data['od.tr.purpose']\nprint(od_microtype_data_df.columns)\n\nmode_choice_additional = pd.read_csv(supplement_data_dir + supplement_data_source, sep = ',')\nprint(mode_choice_additional.columns)\nhousehold_weights = pd.read_csv(supplement_data_dir + household_data_source, sep = ',')\nprint(household_weights.columns)\n\nmode_choice_geotype_df = pd.read_csv(mode_choice_data_dir + mode_choice_geotype_id, sep = ',')\nprint(mode_choice_geotype_df.columns)\n\n# \n\n# generate NHTS variables\nlist_of_used_gems_variables = ['HOUSEID', 'PERSONID', 'TDTRPNUM', 'HHFAMINC', \n 'WHYTRP1S', 'TRPMILES', 'STRTTIME','TRVLCMIN', 'TRIPPURP']\n\nlist_of_od_variables = ['HOUSEID', 'PERSONID', 'TDTRPNUM', 'ORIG_COUNTRY',\n 'o_geoid', 'DEST_COUNTRY', 'd_geoid', 'o_microtype', 'o_geotype',\n 'd_microtype', 'd_geotype']\n\nlist_of_hh_variables = ['HOUSEID', 'WTHHFIN', 'HBHUR']\n\nlist_of_geotype_variables = ['HOUSEID', 'h_geotype']\n\ndef gems_attributes_generator(NHTS_data, distance_bins, distance_bin_labels):\n NHTS_data.loc[:, 'populationGroupType'] = 'high-income'\n NHTS_data.loc[NHTS_data['hhfaminc']<= 5, 'populationGroupType'] = 'low-income'\n NHTS_data.loc[NHTS_data['hhfaminc'].isin([6,7,8]), 'populationGroupType'] = 'medium-income'\n NHTS_data.loc[:, 'trip_purpose'] = 'other'\n NHTS_data.loc[NHTS_data['whytrp1s'] == 1, 'trip_purpose'] = 'home'\n NHTS_data.loc[NHTS_data['whytrp1s'] == 10, 'trip_purpose'] = 'work'\n NHTS_data.loc[NHTS_data['whytrp1s'].isin([40, 50, 80]), 'trip_purpose'] = 'leisure'\n NHTS_data.loc[NHTS_data['whytrp1s'] == 20, 'trip_purpose'] = 'school'\n NHTS_data.loc[NHTS_data['whytrp1s'] == 30, 'trip_purpose'] = 'medical'\n# NHTS_data.loc[:, 'tripPurpose'] = 'nonwork'\n# NHTS_data.loc[NHTS_data['whytrp1s'].isin(work_trips), 'tripPurpose'] = 'work' \n NHTS_data.loc[:, 'distanceBin'] = pd.cut(NHTS_data.loc[:, 'trpmiles'], distance_bins, \n labels = distance_bin_labels, ordered = False)\n return(NHTS_data)\n\ndef travel_time_calculator(NHTS_data):\n NHTS_data.loc[:, 'start hour'] = NHTS_data.loc[:, 'strttime'] / 100\n NHTS_data.loc[:, 'start hour'] = NHTS_data.loc[:, 'start hour'].astype(int)\n NHTS_data.loc[:, 'start hour'] = NHTS_data.loc[:, 'start hour'] + \\\n (NHTS_data.loc[:, 'strttime'] %100) / 60.0\n\n # NHTS_data.loc[:, 'end hour'] = NHTS_data.loc[:, 'endtime'] / 100\n # NHTS_data.loc[:, 'end hour'] = NHTS_data.loc[:, 'end hour'].astype(int)\n # NHTS_data.loc[:, 'end hour'] = NHTS_data.loc[:, 'end hour'] + \\\n # (NHTS_data.loc[:, 'endtime'] %100) / 60.0\n\n NHTS_data.loc[:, 'travel time'] = NHTS_data.loc[:, 'trvlcmin'] / 60.0\n # NHTS_data.loc[(NHTS_data['travel time'] < 0) & (NHTS_data['trpmiles'] < 1), 'travel time'] = 0\n # NHTS_data.loc[NHTS_data['travel time'] < 0, 'travel time'] += 24\n return(NHTS_data)\n \n\nmode_choice_additional = mode_choice_additional.loc[:, list_of_used_gems_variables]\nmode_choice_additional.columns= mode_choice_additional.columns.str.lower()\nmode_choice_data_df = pd.merge(mode_choice_data_df, mode_choice_additional, \n on = ['houseid', 'personid', 'tdtrpnum'], how = 'left')\n\nmode_choice_geotype_df = mode_choice_geotype_df.loc[:, list_of_geotype_variables]\nmode_choice_geotype_df.columns= mode_choice_geotype_df.columns.str.lower()\nmode_choice_geotype_df = mode_choice_geotype_df.drop_duplicates(keep = 'first')\nmode_choice_data_df = pd.merge(mode_choice_data_df, mode_choice_geotype_df, \n on = ['houseid'], how = 'left')\n\nod_variable_df = od_microtype_data_df.loc[:, list_of_od_variables]\nod_variable_df.columns= od_variable_df.columns.str.lower()\nmode_choice_data_df = pd.merge(mode_choice_data_df, od_variable_df, \n on = ['houseid', 'personid', 'tdtrpnum'], how = 'left')\n\nhh_variable_df = household_weights.loc[:, list_of_hh_variables]\nhh_variable_df.columns= hh_variable_df.columns.str.lower()\nmode_choice_data_df = pd.merge(mode_choice_data_df, hh_variable_df, \n on = ['houseid'], how = 'left')\n\ndistance_bins = [0, 1.3, 3, 5, 8, 10, 20, mode_choice_data_df['trpmiles'].max()]\ndistance_bin_labels = ['1_1', '1_2', '1_3', '2_1', '2_2', '3_0', '4_0']\n\nmode_choice_data_df = gems_attributes_generator(mode_choice_data_df, distance_bins, distance_bin_labels)\nmode_choice_data_df = travel_time_calculator(mode_choice_data_df)\nprint(mode_choice_data_df.trip_purpose.unique())\n\n# \n\n# change directory to bild aq\nos.chdir('/Users/xiaodanxu/Library/CloudStorage/GoogleDrive-arielinseu@gmail.com/My Drive/GEMS/BILD-AQ/data')\ninput_dir = 'Input/'\nnetwork_dir = 'Network/'\nplot_dir = 'Plot/'\n# generate population\nnational_geoid_lookup = pd.read_csv(national_geoid_lookup_file, sep = ',')\n\nlist_of_population_variables= ['houseid', 'home_geoid', \n 'populationGroupType',\n 'wthhfin']\npopulation_trips = mode_choice_data_df.loc[:, list_of_population_variables]\npopulation_trips = population_trips.drop_duplicates()\npopulation_trips = pd.merge(population_trips, national_geoid_lookup,\n left_on = 'home_geoid', right_on = 'GEOID',\n how = 'left')\npopulation_by_tract = population_trips.groupby(['st_code', 'geotype', \n 'microtype', 'populationGroupType'])[['wthhfin']].sum()\npopulation_by_tract.columns =['NHTS_households']\npopulation_by_tract = population_by_tract.reset_index()\n# population_by_tract.loc[:, 'home_geoid'] = population_by_tract.loc[:, 'home_geoid'].astype(int)\nprint(len(population_by_tract))\npopulation_by_tract.to_csv(input_dir + 'NHTS_population.csv', index = False)\n\n# \n\n# assign trip tag and O-D attributes\nmode_choice_data_df.loc[:, 'h_origin'] = 1 * (mode_choice_data_df.loc[:, 'o_geoid'] == mode_choice_data_df.loc[:, 'home_geoid']) + \\\n0 * (mode_choice_data_df.loc[:, 'o_geoid'] != mode_choice_data_df.loc[:, 'home_geoid'])\n\nmode_choice_data_df.loc[:, 'h_dest'] = 1 * (mode_choice_data_df.loc[:, 'd_geoid'] == mode_choice_data_df.loc[:, 'home_geoid']) + \\\n0 * (mode_choice_data_df.loc[:, 'd_geoid'] != mode_choice_data_df.loc[:, 'home_geoid'])\n\nmode_choice_data_df.loc[:, 'nhb'] = 1 * (mode_choice_data_df.loc[:, 'trippurp'] == 'NHB') + \\\n0 * (mode_choice_data_df.loc[:, 'trippurp'] != 'NHB')\n\n\nmode_choice_data_df.loc[:, 'trip_tag'] = 'nhb'\nmode_choice_data_df.loc[mode_choice_data_df['h_origin'] == 1, 'trip_tag'] = 'origin'\nmode_choice_data_df.loc[mode_choice_data_df['h_dest'] == 1, 'trip_tag'] = 'dest'\nprint(mode_choice_data_df.trip_tag.unique())\nmode_choice_data_df.loc[:, ['h_origin', 'h_dest', 'nhb', 'trip_tag']].head(10)\n\n# assign tour end micro-geotype ID\nmode_choice_data_df.loc[:, 'dest_geotype'] = \\\nmode_choice_data_df.loc[:, 'o_geotype']\nmode_choice_data_df.loc[:, 'dest_microtype'] = \\\nmode_choice_data_df.loc[:, 'o_microtype']\nmode_choice_data_df.loc[:, 'dest_geoid'] = \\\nmode_choice_data_df.loc[:, 'o_geoid']\n\nmode_choice_data_df.loc[:, 'orig_geotype'] = \\\nmode_choice_data_df.loc[:, 'd_geotype']\nmode_choice_data_df.loc[:, 'orig_microtype'] = \\\nmode_choice_data_df.loc[:, 'd_microtype']\nmode_choice_data_df.loc[:, 'orig_geoid'] = \\\nmode_choice_data_df.loc[:, 'd_geoid']\n\ncriteria1 = mode_choice_data_df['trip_tag'].isin(['origin', 'nhb'])\n\nmode_choice_data_df.loc[criteria1, 'dest_geotype'] = \\\nmode_choice_data_df.loc[criteria1, 'd_geotype']\nmode_choice_data_df.loc[criteria1, 'dest_microtype'] = \\\nmode_choice_data_df.loc[criteria1, 'd_microtype']\nmode_choice_data_df.loc[criteria1, 'dest_geoid'] = \\\nmode_choice_data_df.loc[criteria1, 'd_geoid']\n\nmode_choice_data_df.loc[criteria1, 'orig_geotype'] = \\\nmode_choice_data_df.loc[criteria1, 'd_geotype']\nmode_choice_data_df.loc[criteria1, 'orig_microtype'] = \\\nmode_choice_data_df.loc[criteria1, 'd_microtype']\nmode_choice_data_df.loc[criteria1, 'orig_geoid'] = \\\nmode_choice_data_df.loc[criteria1, 'o_geoid']\n\n# \n\n# aggregating trip by home micro-geotype, trip purpose, \n# time bin, distance bin and income group\n\nstate_region_lookup = read_csv(input_dir + state_region_file)\nmode_choice_data_df.loc[:, 'mode'] = mode_choice_data_df.loc[:, 'trptrans'].map(mode_lookup)\ncar_data_df = mode_choice_data_df.loc[mode_choice_data_df['mode'] == 'auto']\n# car_data_df = car_data_df.loc[car_data_df['st_code'].isin(['AK', 'HI', 'OR', 'WA'])]\n\n# plt.ylim([0, 0.03])\n# plt.show()\ncar_data_df = car_data_df[['home_geoid', 'o_geoid', 'd_geoid', \n 'o_geotype', 'o_microtype', \n 'dest_geotype','dest_microtype', \n 'populationGroupType', 'trip_tag', \n 'start hour', 'travel time', \n 'trip_purpose', 'trpmiles', 'distanceBin', 'wtperfin']]\ncar_data_df = pd.merge(car_data_df, national_geoid_lookup,\n left_on = 'home_geoid', right_on = 'GEOID',\n how = 'left')\ncar_data_df.to_csv(input_dir + 'NHTS_car_trips.csv', index = False)\n\n# \nunique_state = state_region_lookup['state'].unique()\nfor st in unique_state:\n region_code = str(state_region_lookup.loc[state_region_lookup['state'] == st, 'region'].tolist()[0])\n \n list_of_neighboring_states = \\\n state_region_lookup.loc[state_region_lookup['region'] == region_code, 'state'].unique()\n print(region_code, list_of_neighboring_states)\n car_data_df_sel = car_data_df.loc[car_data_df['st_code'].isin(list_of_neighboring_states)]\n home_based_trips = \\\n car_data_df_sel.loc[car_data_df_sel['trip_tag'].isin(['origin', 'dest'])]\n nonhome_based_trips = \\\n car_data_df_sel.loc[car_data_df_sel['trip_tag'] == 'nhb']\n print(home_based_trips.wtperfin.sum()) # about 75%\n print(nonhome_based_trips.wtperfin.sum()) # about 25%\n \n # assign ditance for non-home trips\n filelist = []\n for item in list_of_neighboring_states:\n filelist.append(network_dir + 'combined/distance_matrix_by_tracts_' + item + '.csv')\n print(filelist)\n distance_matrix = pd.concat([read_csv(f) for f in filelist ])\n\n list_of_home_tracts = nonhome_based_trips['home_geoid'].unique()\n distance_matrix = \\\n distance_matrix.loc[distance_matrix['origin'].isin(list_of_home_tracts)]\n\n distance_matrix.loc[:, ['origin', 'destination']] = \\\n distance_matrix.loc[:, ['origin', 'destination']].astype(int)\n\n \n nonhome_based_trips = pd.merge(nonhome_based_trips, distance_matrix,\n left_on = ['home_geoid', 'o_geoid'],\n right_on = ['origin', 'destination'], \n how = 'left')\n \n distance_bins = [0, 1.3, 3, 5, 8, 10, 20, nonhome_based_trips['distance'].max()]\n distance_bin_labels = ['1_1', '1_2', '1_3', '2_1', '2_2', '3_0', '4_0']\n \n nonhome_based_trips.loc[:, 'distanceBin_home'] = pd.cut(nonhome_based_trips.loc[:, 'distance'], distance_bins, \n labels = distance_bin_labels, ordered = False)\n \n # generating trip rates for home-based trips\n result_dir = input_dir + st\n if not os.path.exists(result_dir):\n os.mkdir(result_dir)\n visual_dir = input_dir + st\n if not os.path.exists(visual_dir):\n os.mkdir(visual_dir)\n grouping_var = ['geotype', 'microtype', 'populationGroupType',\n 'dest_geotype','dest_microtype', \n 'trip_tag', 'trip_purpose', 'distanceBin']\n \n home_based_trips_agg = home_based_trips.groupby(grouping_var)[['wtperfin']].sum()\n home_based_trips_agg.columns = ['NHTS_trips']\n home_based_trips_agg = home_based_trips_agg.reset_index()\n hb_output_file = 'NHTS_home_based_trips_' + st + '.csv'\n home_based_trips_agg.to_csv(os.path.join(result_dir, hb_output_file), index = False)\n\n # pairing home and non-home based trips to generate VMT fraction\n home_based_trips.loc[:, 'DistanceBinID'] = \\\n home_based_trips.loc[:, 'distanceBin'].str.split('_').str[0]\n \n home_var = ['dest_geotype', 'dest_microtype', 'DistanceBinID']\n home_based_trips.loc[:, 'VMT'] = home_based_trips.loc[:, 'trpmiles'] * home_based_trips.loc[:, 'wtperfin']\n home_trips = home_based_trips.groupby(home_var)[['VMT']].sum()\n home_trips.columns = ['home VMT']\n home_trips = home_trips.reset_index()\n # home_trips.head(5)\n \n nonhome_based_trips.loc[:, 'DistanceBinID'] = \\\n nonhome_based_trips.loc[:, 'distanceBin_home'].str.split('_').str[0]\n \n nonhome_var = ['o_geotype', 'o_microtype', 'DistanceBinID']\n nonhome_based_trips.loc[:, 'VMT'] = nonhome_based_trips.loc[:, 'trpmiles'] * nonhome_based_trips.loc[:, 'wtperfin']\n nonhome_trips = nonhome_based_trips.groupby(nonhome_var)[['VMT']].sum()\n nonhome_trips.columns = ['nonhome VMT']\n nonhome_trips = nonhome_trips.reset_index()\n # nonhome_trips.head(5)\n \n nonhome_fraction = pd.merge(home_trips, nonhome_trips,\n left_on = home_var, \n right_on = nonhome_var, how = 'left')\n\n nonhome_fraction = nonhome_fraction.loc[(nonhome_fraction['home VMT'] > 0) & (nonhome_fraction['nonhome VMT'] > 0)]\n print(nonhome_fraction[['home VMT', 'nonhome VMT']].sum())\n nonhome_fraction.loc[:, 'nhb_fraction_VMT'] = nonhome_fraction.loc[:, 'nonhome VMT'] / \\\n nonhome_fraction.loc[:, 'home VMT']\n\n # nonhome_fraction.head(5)\n sns.boxplot(data = nonhome_fraction, x = 'DistanceBinID', y = 'nhb_fraction_VMT', \n showfliers = False)\n plt.savefig(os.path.join(visual_dir, 'nhb_fraction_by_dist_bin.png'), dpi = 200)\n plt.show()\n nonhome_fraction = nonhome_fraction[['o_geotype', 'o_microtype', 'DistanceBinID', 'nhb_fraction_VMT']]\n nhb_output_file = 'NHTS_nonhome_VMT_fraction_' + st + '.csv'\n nonhome_fraction.to_csv(os.path.join(result_dir, nhb_output_file), index = False) \n # break\n\n","repo_name":"arielgatech/BILD_AQ_EV_VMT","sub_path":"scripts/input_generation/BILDAQ_NHTS_trip_generation_rate.py","file_name":"BILDAQ_NHTS_trip_generation_rate.py","file_ext":"py","file_size_in_byte":15916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42383813542","text":"#!/usr/bin/env python\n#\n# ELEC350 PSK31 Beacon Message Encoder Example\n#\n\nfrom struct import pack\nimport varicode\n\ndef myVaricodeEncodingFunction(message):\n\n ##################################################################\n # The following code is for illustration purposes only. This code\n # does not do the Varicode encoding but illustrates how you might\n # use the Varicode table provided.\n encoding_tbl = { 0 : '1010', 1 : '1011' }\n \n print(\"Encoding user message: \" + message[:-1])\n charList = [encoding_tbl[ord(char)%2] for char in list(message)]\n\n print(\"Encoded user message as \" + ''.join(charList))\n return ''.join(charList)\n # END OF EXAMPLE CODE\n ##################################################################\n\n\ndef myDifferentialEncodingFunction(message):\n \n ##################################################################\n # The following code is for illustration purposes only. This code\n # does not do the differential encoding, it only illustrates\n # how you might fill the array of floats using the input string.\n out = []\n\n for char in list(message):\n if (char == '0'):\n out.append(-1.0)\n else:\n out.append(1.0)\n \n print(\"Encoded \" + message + \" as \" + ' '.join([str(x) for x in out]))\n\n return out\n # END OF EXAMPLE CODE\n ##################################################################\n\n\nif __name__ == \"__main__\":\n\n # Get the user message string.\n userMsg = raw_input('Message String: ')\n\n # Call your own function to translate the \n # ASCII input characters to Varicode strings.\n varicodeMsg = myVaricodeEncodingFunction(userMsg + '\\n')\n\n # Call your own function to perform the differential \n # encoding on the varicode string.\n encodedMsg = myDifferentialEncodingFunction(varicodeMsg)\n\n # Write the output file\n filename = 'output.dat'\n print('Writing ' + filename + '...')\n\n try:\n outfile = open(filename, 'w')\n except:\n print('Could not open ' + filename + '.')\n exit(-1)\n\n outputStr = ''.join([pack('f', x) for x in encodedMsg])\n outfile.write(outputStr)\n outfile.close()\n\n print('Done.')\n\n# END OF CODE\n","repo_name":"mistic-lab/ece350","sub_path":"lab_manual/data/encode_example.py","file_name":"encode_example.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17518685506","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\n\r\n# Region glowing function\r\n# Perform region glowing in image input by specified seed_point(s).\r\n# The Predicate is the difference between seed_point and exploring point value is less than threshold_value.\r\n# Mark the point that passed predicate to the region.\r\n# Mark the point that not passed predicate and didn't explore it again.\r\n# Explore in 8-connected neighborhood of border point of in the region.\r\n\r\n\r\n\r\n# Region_glowing function \r\ndef region_glowing(seed_point,threshold_value):\r\n seed_point_mean = (np.sum(img_data[seed_point[1],seed_point[0],:3]))/3 # Calucate mean of seed point\r\n print('seed_point', seed_point[0], seed_point[1],': ',img_data[seed_point[1],seed_point[0]])\r\n\r\n tmp = np.zeros((weight,height)) # Create array for marked that point expolred or not\r\n # 0 = not explore, -1 = explored\r\n\r\n img_data[seed_point[1],seed_point[0]] = [255,0,0,255] # Set seed point to region\r\n tmp[seed_point[0],seed_point[1]] = -1 # Mark seed point that is explored \r\n broaders = [(seed_point[0],seed_point[1])] # Create arrray for keep broaders pixel of region\r\n\r\n # loop untill can't find the point to explore\r\n loop_count = 0 \r\n while(len(broaders)>=1):\r\n broaders_tmp = broaders\r\n for broader in broaders_tmp:\r\n x,y = broader[0],broader[1]\r\n tmp[x,y] = 1\r\n broaders.remove((x,y)) # remove explored from broaders\r\n # 8-connected neighborhood of border point\r\n neighbor = [(x-1,y-1),(x,y-1),(x+1,y-1),\r\n (x-1,y), (x+1,y),\r\n (x-1,y+1),(x,y+1),(x+1,y+1)\r\n ]\r\n # loop check predicate of neighborhood\r\n for pixel in neighbor:\r\n if (tmp[pixel[0],pixel[1]] == 0):\r\n dist = (np.sum(img_data[pixel[1],pixel[0],:3]))/3\r\n tmp[pixel[0],pixel[1]] = -1\r\n if abs(dist- seed_point_mean) <= threshold_value:\r\n img_data[pixel[1],pixel[0]] = [255,0,0,255]\r\n broaders.append((pixel[0],pixel[1]))\r\n \r\n loop_count += 1\r\n\r\n# Load image to numpy array\r\nimg_data = np.array(Image.open('brain.png'))\r\nplt.subplot(1, 2, 1)\r\nplt.imshow(img_data)\r\nplt.title('Test Image')\r\n\r\n# Set seed point\r\nseed_point_1 = (188, 226)\r\nseed_point_2 = (301, 207)\r\n\r\n# Get height and weight\r\nheight, weight, shape = img_data.shape\r\n\r\nregion_glowing(seed_point_1,100)\r\nregion_glowing(seed_point_2,50)\r\n\r\n# Showing output image \r\nplt.subplot(1, 2, 2)\r\nplt.imshow(img_data)\r\nplt.title('Output Image')\r\nplt.show()\r\n\r\n\r\n","repo_name":"piyawat-at/Region_Growing","sub_path":"region_growing.py","file_name":"region_growing.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26379930173","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\n\nfrom results.serializers import teacher\nfrom results.serializers.class_room import MiniClassRoomSerializer\nfrom ..serializers import ClassRoomSerializer\nfrom ..models import ClassRoom, Subject, PaperAllocation\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action, api_view\n\n\nclass ClassRoomViewSet(viewsets.ModelViewSet):\n queryset = ClassRoom.objects.all()\n serializer_class = ClassRoomSerializer\n\n def get_queryset(self):\n queryset = super().get_queryset()\n teacher_pk = self.kwargs.get('teacher_pk')\n level_pk = self.kwargs.get('level_pk')\n if teacher_pk:\n queryset = queryset.filter(teacher=teacher_pk)\n if level_pk:\n queryset = queryset.filter(teacher=level_pk)\n params = self.request.query_params\n if params:\n queryset = queryset.filter(**params.dict())\n return queryset\n\n @action(detail=False, methods=['GET'], name='get_count', url_path='count')\n def get_count(self, request, *args, **kwargs):\n params = self.request.query_params\n queryset = super().get_queryset()\n if params:\n queryset = queryset.filter(**params.dict())\n count = queryset.count()\n return Response({'count':count})\n\n\n@api_view(['GET'])\ndef get_teacher_allocated_class_rooms(request, teacher_pk):\n class_room_ids = [class_room_paper.class_room_id for class_room_paper in PaperAllocation.objects.filter(teacher=teacher_pk)]\n queryset = ClassRoom.objects.filter(id__in=class_room_ids)\n params = request.GET\n if params:\n queryset = queryset.filter(**params.dict())\n serializer = ClassRoomSerializer(queryset.all(), many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef get_teacher_allocated_class_room(request, teacher_pk, class_room_pk):\n class_room_ids = [class_room_paper.class_room_id for class_room_paper in PaperAllocation.objects.filter(teacher=teacher_pk)]\n queryset = ClassRoom.objects.filter(id__in=class_room_ids)\n params = request.GET\n if params:\n queryset = queryset.filter(**params.dict())\n class_room = get_object_or_404(queryset, pk=class_room_pk)\n serializer = ClassRoomSerializer(class_room)\n return Response(serializer.data)\n","repo_name":"samuelitwaru/wex-erp","sub_path":"results/views/class_room.py","file_name":"class_room.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35424234725","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 31 12:23:54 2018\r\n\r\n@author: ides13\r\n\"\"\"\r\nimport urllib.parse\r\n#import urllib\r\nimport pandas as pd\r\nimport sys\r\nimport webbrowser\r\n\r\n\r\ndef naeredu():\r\n rooturl='http://terms.naer.edu.tw/search/?q={}&field=ti&op=AND&match=&group=&num=30'\r\n url = ''\r\n \r\n while True:\r\n word = input(\"在【學術名詞】查詢:\")\r\n if word==\"q\":\r\n# sys.exit()\r\n break\r\n elif word ==\"openurl\":\r\n webbrowser.open(url) \r\n else:\r\n word = urllib.parse.quote(word, safe='') \r\n \t\t#如上,中文網址需要編碼,例如try and error的英文“詞”也需要。\r\n url = rooturl.format(word)\r\n #print (url)\r\n try:\r\n myhtml = pd.read_html(url)\r\n # print (\"1111111111111\")\r\n table = myhtml[0]\r\n table = table.drop(['INFO', '全選'], axis=1)\r\n print (table)\r\n \r\n print('\\n')\r\n except (IndexError, ValueError):\r\n print(\"在學術名詞網中,找不到這個詞,請重新輸入\")\r\n return \r\n \r\nif __name__ == \"__main__\":\r\n naeredu() \r\n","repo_name":"ides13/naeredu","sub_path":"NaerEduFunc.py","file_name":"NaerEduFunc.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23930935345","text":"import json\nimport requests\nfrom random import *\n\n## set defaults\npractice_file = 'simple_words.txt'\nwordset_file = 'hard_words.txt'\nwords_to_ask = 5\ndefault_timeout = 1\nfalse_word_min = 4\nfalse_word_max = 10\nmax_wordset_size = 100\nmorse_frequency = 800\ndit_duration = 80\nsound_on = True\n\n\ndef save_wordset(w_list, d_list):\n '''\n Save fetched words in the wordset for future use\n '''\n\n try:\n with open(wordset_file) as h_words:\n wl = json.load(h_words)\n except FileNotFoundError:\n wl = []\n\n if wl:\n shuffle(wl)\n while len(wl) > (max_wordset_size - words_to_ask):\n wl.pop()\n\n for k in range(len(w_list)):\n t = [w_list[k], d_list[k]]\n wl.append(t)\n\n try:\n with open(wordset_file, 'w') as h_words:\n json.dump(wl, h_words)\n except OSError:\n print('Some IO error occured, wordset not saved')\n return 0\n\n return 1\n\ndef fetch_from_simple():\n '''\n Fetches a list of words from provided file simple_file\n (each word on its own line).\n '''\n \n words = []\n\n try:\n with open(practice_file) as s_words:\n for line in s_words.readlines():\n words.append(line.strip())\n except FileNotFoundError:\n print('Local practice wordset not found.')\n print(f\"Create a txt file '{practice_file}' and fill it with at least\"\n f\" {words_to_ask} words, each in a new line.\")\n return 0\n shuffle(words) \n return words[0 : words_to_ask], 0\n\ndef fetch_from_hard():\n '''\n Fetches a list of words and definitions from provided file hard_file\n (list of lists, generated by fetch_from_net).\n '''\n \n words = []\n definitions = []\n\n try:\n with open(wordset_file) as h_words:\n wl = json.load(h_words)\n except FileNotFoundError:\n print('Local wordset not found.')\n print(\"Create it by running the 'Fetch from the Web' option.\")\n return 0, 0\n \n if len(wl) < words_to_ask:\n print('Local wordset is too small!')\n print(\"Expand it by running the 'Fetch from Internet' option.\")\n return 0, 0\n \n shuffle(wl)\n for k in range(0, words_to_ask):\n words.append(wl[k][0])\n definitions.append(wl[k][1])\n\n return words, definitions\n\ndef fetch_from_net():\n '''\n Fetches a list of words and definitions from the web\n '''\n \n words = []\n definitions = []\n url = \"https://random-words-api.vercel.app/word\"\n print('Fetching a wordset from the web ', end = '')\n \n for k in range(words_to_ask):\n test_result = 'cát'\n\n ## test for non-ASCII characters\n while not test_result.isascii():\n try:\n response = requests.get(url, timeout = default_timeout)\n except requests.exceptions.Timeout:\n print('\\nCommunication timeout.\\nPlease check your connection and'\n ' try again or adjust the default_timeout setting.')\n return 0, 0\n except requests.RequestException:\n print('\\nAn error occured while trying to fetch a wordset.')\n return 0, 0\n \n data = response.json()\n test_result = data[0][\"word\"]\n \n words.append(data[0][\"word\"])\n definitions.append(data[0][\"definition\"])\n print('.', end='')\n\n print('\\n')\n save_wordset(words, definitions)\n return words, definitions\n\ndef false_words():\n '''\n Lazy random word generator. For great justice!\n '''\n \n vowels = ['a', 'e', 'i', 'o', 'u']\n consonants = ['b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l',\n 'm', 'n', 'p', 'q', 'r', 's', 't', 'v', 'w', 'x', 'y', 'z']\n words = []\n\n for _ in range(words_to_ask):\n was_consonant = 0\n was_vowel = 0\n s = ''\n word_len = randint(false_word_min, false_word_max)\n\n while len(s) < word_len:\n if randint(0,1) and not randint(0, was_vowel):\n l = vowels[randint(0, len(vowels)-1)]\n was_vowel += 1\n was_consonant = 0\n \n elif not randint(0, was_consonant):\n l = consonants[randint(0, len(consonants)-1)]\n was_vowel = 0\n was_consonant += 1\n\n else: l = ''\n s += l\n\n words.append(s)\n\n return words, []\n\ndef get_codes(morse_codes_file):\n '''Get Morse codes from file defined as morse_codes_file'''\n\n try:\n with open(morse_codes_file) as codes:\n mc = json.load(codes)\n except FileNotFoundError:\n print(f\"File '{morse_codes_file}' not found.\")\n return 0\n\n ## need this for multi-word sets, if any\n mc[' '] = '\\t'\n return mc\n\n##print(fetch_from_simple())\n##print(fetch_from_hard())\n##a, b = fetch_from_net()\n##print(a, b)\n##print(false_words())\n##\n##if a: save_wordset(a, b)\n","repo_name":"Ontaelio/Morse-Guesser","sub_path":"mc_wordsets.py","file_name":"mc_wordsets.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36008583484","text":"from django import forms\r\nfrom django.forms import ModelForm\r\nfrom parts.models import Part\r\nfrom vendors.models import Vendor\r\nfrom django_select2 import *\r\nfrom inventory.models import InventoryCount, InventoryLocation\r\n \r\nclass PartChoice(AutoModelSelect2Field):\r\n queryset = Part.objects\r\n search_fields = ['part_number__istartswith',]\r\n \r\nclass LocationChoice(AutoModelSelect2Field):\r\n queryset = InventoryLocation.objects\r\n search_fields = ['location_code__istartswith',]\r\n \r\nclass InventoryForm(forms.Form):\r\n part = forms.CharField()\r\n location = forms.CharField()\r\n inventory_count = forms.IntegerField()\r\n location_complete = forms.BooleanField(required=False)\r\n \r\nclass DirectToLocationForm(forms.Form):\r\n location = LocationChoice()\r\n \r\nclass DirectToPartForm(forms.Form):\r\n part = PartChoice()\r\n","repo_name":"disflux/django-mtr","sub_path":"inventory/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"}
+{"seq_id":"35312274190","text":"# -*- coding: utf-8 -*-\n# © 2017 Comunitea\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom openerp import models, fields, api\n\nfrom openerp.osv import fields as fields2, osv\n\n\nclass SaleOrder(models.Model):\n\n _inherit = 'sale.order'\n\n expedition_date = fields.Date('Expedition Date', required=True)\n\n @api.multi\n def fix_sale_lines_price(self):\n # Correcciones en pedidos 11/05/17\n solist = ['SO335', 'SO333', 'SO300', 'SO287', 'SO191', 'SO336',\n 'SO267', 'SO344', 'SO200']\n for order in self.search([('name', 'in', solist)]):\n for line in order.order_line:\n if line.invoice_lines:\n if line.price_unit != line.invoice_lines[0].price_unit or \\\n order.name == 'SO200':\n line.price_unit = line.invoice_lines[0].price_unit\n if line.discount1 != line.invoice_lines[0].discount1 or \\\n order.name == 'SO200':\n line.discount1 = line.invoice_lines[0].discount1\n if line.discount2 != line.invoice_lines[0].discount2 or \\\n order.name == 'SO200':\n line.discount2 = line.invoice_lines[0].discount2\n if line.discount3 != line.invoice_lines[0].discount3 or \\\n order.name == 'SO200':\n line.discount3 = line.invoice_lines[0].discount3\n if line.discount4 != line.invoice_lines[0].discount4 or \\\n order.name == 'SO200':\n line.discount4 = line.invoice_lines[0].discount4\n self.env['sale.order.line'].browse(193).price_unit = 25.0\n self.env['sale.order'].browse(271).order_line.write({'price_unit': 0})\n self.env['sale.order.line'].browse(531).write({'price_unit': 19.2000,\n 'discount1': 28.33})\n\n\nclass SaleOrderDatesOldApi(osv.osv):\n \"\"\"Overwrited old api\"\"\"\n _inherit = 'sale.order'\n\n def _get_effective_date(self, cr, uid, ids, name, arg, context=None):\n \"\"\"OVERWRITED, only pickings processed\"\"\"\n res = {}\n dates_list = []\n for order in self.browse(cr, uid, ids, context=context):\n dates_list = []\n for pick in order.picking_ids:\n if pick.state == 'done':\n dates_list.append(pick.date)\n if dates_list:\n res[order.id] = min(dates_list)\n else:\n res[order.id] = False\n return res\n\n _columns = {\n 'effective_date': fields2.function(\n _get_effective_date, type='date',\n store=True, string='Effective Date',\n help=\"Date on which the first Delivery Order was created.\")\n }\n\n\nclass SaleReport(models.Model):\n\n _inherit = 'sale.report'\n\n expedition_date = fields.Date('Expedition Date', readonly=True)\n\n def _select(self):\n select_str = \"\"\"\n WITH currency_rate (currency_id, rate, date_start, date_end) AS (\n SELECT r.currency_id, r.rate, r.name AS date_start,\n (SELECT name FROM res_currency_rate r2\n WHERE r2.name > r.name AND\n r2.currency_id = r.currency_id\n ORDER BY r2.name ASC\n LIMIT 1) AS date_end\n FROM res_currency_rate r\n )\n SELECT min(l.id) as id,\n l.product_id as product_id,\n t.uom_id as product_uom,\n sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty,\n sum(l.product_uom_qty * l.price_unit / cr.rate * (100.0-l.discount) / 100.0) as price_total,\n count(*) as nbr,\n s.date_order as date,\n s.date_confirm as date_confirm,\n s.expedition_date as expedition_date,\n s.partner_id as partner_id,\n s.user_id as user_id,\n s.company_id as company_id,\n extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay,\n l.state,\n t.categ_id as categ_id,\n s.pricelist_id as pricelist_id,\n s.project_id as analytic_account_id,\n s.section_id as section_id\n \"\"\"\n return select_str\n\n def _group_by(self):\n group_by_str = \"\"\"\n GROUP BY l.product_id,\n l.order_id,\n t.uom_id,\n t.categ_id,\n s.date_order,\n s.date_confirm,\n s.partner_id,\n s.user_id,\n s.company_id,\n l.state,\n s.pricelist_id,\n s.project_id,\n s.section_id,\n s.expedition_date\n \"\"\"\n return group_by_str","repo_name":"Comunitea/CMNT_00074_2016_UM","sub_path":"project-addons/sale_custom/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41043628535","text":"# Syed Aftaabuddin\r\n# Project 3a\r\n# May 5, 2020\r\n\r\nfrom datetime import datetime\r\nclass Vehicle:\r\n \r\n\r\n def __init__(self, vin, year, color, owner):\r\n \r\n #coverted to uppercase\r\n vin = vin.upper( )\r\n \r\n #if year greater than current_year\r\n current_year = datetime.now( ).year\r\n if year > current_year:\r\n year = current_year\r\n \r\n #removing all special characters from vin\r\n alphanumeric_filter = filter(str.isalnum, vin)\r\n self.vin = \"\".join(alphanumeric_filter)\r\n self.year = year\r\n self.color = color\r\n self.owner = owner\r\n self._sales_tax_paid = False\r\n \r\n\r\n def is_sales_tax_paid(self):\r\n if self._sales_tax_paid:\r\n return \"Yes\"\r\n else:\r\n return \"No\"\r\n \r\n def pay_sales_tax(self):\r\n self._sales_tax_paid = True\r\n\r\n\r\n def __str__(self):\r\n ret_val = f\"Vin: {self.vin}\\n\" + \\\r\n f\"Year: {self.year}\\n\" + \\\r\n f\"color: {self.color}\\n\" + \\\r\n f\"Owner: {self.owner}\\n\" + \\\r\n f\"Is Sales Tax Paid: {self.is_sales_tax_paid( )}\\n\"\r\n return ret_val\r\n\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n\r\n\r\n","repo_name":"saftaabu/IT-212-Applied-OO-Programming-","sub_path":"Projects/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11484197740","text":"from django.conf.urls import patterns, include, url\nfrom views import index, event_by_id, beer_rating, event_stats, event_list, beer_stats, beer_overall, user_ratings, beer_list\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'loevdalnet.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^$', index, name=\"index\"),\n url(r'^event/(?P[0-9]+)/$', event_by_id, name=\"event_by_id\"),\n url(r'^event/(?P[0-9]+)/beer/(?P
[a-z0-9]+)/$', beer_rating, name=\"beer_rating\"),\n url(r'^event/(?P[0-9]+)/results/$', event_stats, name=\"event_stats\"),\n url(r'^event/(?P[0-9]+)/admin/$', event_list, name=\"event_list\"),\n url(r'^stats/beer/(?P[0-9a-z]+)/$', beer_stats, name=\"beer_stats\"),\n url(r'^stats/beers/$', beer_list, name=\"beer_list\"),\n url(r'^stats/top10/$', beer_overall, name=\"beer_overall\"),\n url(r'^user/(?P[0-9]+)/ratings/$', user_ratings, name=\"user_ratings\"),\n \n #url(r'^newpost/$', newpost),\n #url(r'^posts/(?P[-a-zA-Z0-9]+)/$', post_by_slug, name='post_by_slug'),\n)\n","repo_name":"hakloev/old-hakloevno","sub_path":"apps/beertasting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37138071334","text":"# it only reads one line at a time. When the next line is read, the previous one will be garbage collected unless you have stored a reference to it somewhere else \r\n\r\nwith open(\"log.txt\") as infile:\r\n for line in infile:\r\n do_something_with(line)\r\n\r\n# ##############\t\t\r\nimport sys\r\n\r\nwith open(sys.argv[2], 'w') as outfile:\r\n with open(sys.argv[1]) as infile:\r\n for line in infile:\r\n outfile.write(line)\r\n\r\n# ##############\r\n\t\t\r\n# alternate\r\nchunk = infile.read(chunksize)\r\n# read lines within chunck\r\n\r\n# ##############\r\n# with pandas\r\n\r\ndf = pd.read_csv('matrix.txt',sep=',', header = None, skiprows= 1000, nrows=1000)","repo_name":"Wisecracks/Python-dabbles","sub_path":"ReadFile.py","file_name":"ReadFile.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"39395575867","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# # 1\n\n# In[3]:\n\n\ndf = pd.read_csv('Wine_completed.csv')\ndf\n\n\n# # 2\n\n# In[5]:\n\n\ndf.head(10)\n\n\n# In[6]:\n\n\ndf.tail(10)\n\n\n# In[7]:\n\n\ndf.sample(10)\n\n\n# # 3\n\n# In[8]:\n\n\ndf.info()\n\n\n# In[9]:\n\n\ndf.describe()\n\n\n# # 4\n\n# In[10]:\n\n\nsns.pairplot(df)\n\n\n# # 5\n\n# In[11]:\n\n\ndf.corr()\n\n\n# # 6\n\n# In[12]:\n\n\nfig = plt.figure(figsize=(12,8))\nsns.heatmap(df.corr(), annot=True, cmap='coolwarm', linecolor='white', linewidth=2)\n\n\n# # 7\n\n# In[13]:\n\n\nplt.title('Best correlation')\nplt.xlabel('Flavanoids')\nplt.ylabel('OD280/OD315 of diluted wines')\nplt.scatter(df['Flavanoids'], df['OD280/OD315 of diluted wines'])\n\n\n# # 8\n\n# In[14]:\n\n\nplt.title('Worst correlation')\nplt.xlabel('Malic acid')\nplt.ylabel('Hue')\nplt.scatter(df['Malic acid'], df['Hue'])\n\n\n# # 9\n\n# In[15]:\n\n\nplt.title('Nearest zero correlation')\nplt.xlabel('Ash')\nplt.ylabel('OD280/OD315 of diluted wines')\nplt.scatter(df['Ash'], df['OD280/OD315 of diluted wines'])\n\n\n# # 10\n\n# In[23]:\n\n\nfig, [[ax1, ax2, ax3],[ ax4, ax5, ax6],[ax7, ax8, ax9]] = plt.subplots(3, 3, figsize=[12,10])\n \nsns.boxplot(df['Alcohol'], orient='v', ax=ax1)\nsns.boxplot(df['Malic acid'], orient='v', ax=ax2)\nsns.boxplot(df['Ash'], orient='v', ax=ax3)\nsns.boxplot(df['Alcalinity of ash'], orient='v', ax=ax4)\nsns.boxplot(df['Magnesium'], orient='v', ax=ax5)\nsns.boxplot(df['Total penols'], orient='v', ax=ax6)\nsns.boxplot(df['Flavanoids'], orient='v', ax=ax7)\nsns.boxplot(df['Nonflavanoids penols'], orient='v', ax=ax8)\nsns.boxplot(df['Proanthocyanins'], orient='v', ax=ax9)\n\nfig.tight_layout()\n\n\n# # 11\n\n# In[24]:\n\n\nfig, [[ax1, ax2],[ ax3, ax4]] = plt.subplots(2, 2, figsize=[12,10])\n \nsns.boxplot(df['Color intensity'], orient='v', ax=ax1)\nsns.boxplot(df['Hue'], orient='v', ax=ax2)\nsns.boxplot(df['OD280/OD315 of diluted wines'], orient='v', ax=ax3)\nsns.boxplot(df['Proline'], orient='v', ax=ax4)\n\nfig.tight_layout()\n\n\n# # 12 + 13 ไม่จำเป็นต้องจัดการ Outlier และไม่มีข้อมูลที่หายไป\n\n# # 14\n\n# In[27]:\n\n\n#มีผลลัพธ์อยู่ใน df แล้ว --> df['Class']\n\ndf\n\n\n# In[30]:\n\n\n#ย้ายแต่ย้ายไปอยู่ column ที่ 14\n\ndf = df[[c for c in df if c not in ['Class']] + ['Class']]\n\ndf\n\n\n# # 15\n\n# In[31]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[32]:\n\n\nX = df.drop(['Class'], axis = 1)\nX\n\n\n# In[33]:\n\n\ny = df['Class']\ny\n\n\n# In[34]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=100)\n\n\n# In[35]:\n\n\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n\n\n# # 16\n\n# In[36]:\n\n\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[37]:\n\n\nsc = StandardScaler()\n\n\n# In[38]:\n\n\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n\n# # 17 Baseline Support Vector Machine\n\n# In[40]:\n\n\nfrom sklearn.svm import SVC\n\n\n# In[41]:\n\n\nsvc = SVC()\n\n\n# In[42]:\n\n\nsvc.fit(X_train, y_train)\n\n\n# In[44]:\n\n\nsvm_predicted = svc.predict(X_test)\nsvm_predicted\n\n\n# # 18\n\n# In[46]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.countplot(svm_predicted)\n\n\n# # 19\n\n# In[47]:\n\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score\n\n\n# In[48]:\n\n\nconfusion_matrix(y_test,svm_predicted)\n\n\n# # 20\n\n# In[50]:\n\n\nprint('Accuracy = ', accuracy_score(y_test,svm_predicted))\nprint('F1-score = ', f1_score(y_test,svm_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,svm_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,svm_predicted, average='micro'))\n\n\n# In[51]:\n\n\nprint('Accuracy = ', accuracy_score(y_test,svm_predicted))\nprint('F1-score = ', f1_score(y_test,svm_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,svm_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,svm_predicted, average='macro'))\n\n\n# # 21 Hyperparameter tuning\n\n# In[52]:\n\n\nfrom sklearn.model_selection import GridSearchCV\n\n\n# In[53]:\n\n\nparam_combination = {'C':[0.01,0.1,1,10,100,1000,10000], 'gamma':[0.00001,0.0001,0.001,0.01,0.1,1,10]}\n\n\n# In[54]:\n\n\ngrid_search = GridSearchCV(SVC(), param_combination, verbose=3)\n\n\n# In[55]:\n\n\ngrid_search.fit(X_train, y_train)\n\n\n# In[56]:\n\n\ngrid_search.best_params_\n\n\n# In[57]:\n\n\ngrid_predicted = grid_search.predict(X_test)\ngrid_predicted\n\n\n# # 22\n\n# In[58]:\n\n\nconfusion_matrix(y_test,grid_predicted)\n\n\n# # 23\n\n# In[71]:\n\n\nprint('SVM Baseline result(micro) + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,grid_predicted))\nprint('F1-score = ', f1_score(y_test,grid_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,grid_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,grid_predicted, average='micro'))\n\n\n# In[72]:\n\n\nprint('SVM Baseline result(macro) + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,grid_predicted))\nprint('F1-score = ', f1_score(y_test,grid_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,grid_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,grid_predicted, average='macro'))\n\n\n# # 24 Random Forest Baseline\n\n# In[61]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# In[62]:\n\n\nrf = RandomForestClassifier()\nrf.fit(X_train, y_train)\n\n\n# In[63]:\n\n\nrf_predicted = rf.predict(X_test)\n\n\n# # 25\n\n# In[64]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.countplot(rf_predicted)\n\n\n# # 26\n\n# In[65]:\n\n\nconfusion_matrix(y_test,rf_predicted)\n\n\n# # 27\n\n# In[66]:\n\n\nprint('Random Forest Baseline result(micro)')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_predicted))\nprint('F1-score = ', f1_score(y_test,rf_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,rf_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,rf_predicted, average='micro'))\n\n\n# In[67]:\n\n\nprint('Random Forest Baseline result(macro)')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_predicted))\nprint('F1-score = ', f1_score(y_test,rf_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,rf_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,rf_predicted, average='macro'))\n\n\n# # 28 RF + Hyperparameter tuning\n\n# In[68]:\n\n\nparam_grid = {'max_depth':[4,8,16,None],\n 'max_features':[4,8],\n 'n_estimators':[50,100,200,500],\n 'min_samples_split':[3,5,6,7]}\n\n\n# In[69]:\n\n\ngrid_search = GridSearchCV(RandomForestClassifier(), param_grid, verbose=3)\n\n\n# In[70]:\n\n\ngrid_search.fit(X_train, y_train)\n\n\n# In[73]:\n\n\ngrid_search.best_params_\n\n\n# In[74]:\n\n\ngrid_search.best_estimator_\n\n\n# In[76]:\n\n\nrf_grid_predicted = grid_search.predict(X_test)\n\n\n# # 29\n\n# In[77]:\n\n\nconfusion_matrix(y_test,rf_grid_predicted)\n\n\n# In[ ]:\n\n\n#30\n\n\n# In[78]:\n\n\nprint('Random Forest Baseline result(micro) + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_grid_predicted))\nprint('F1-score = ', f1_score(y_test,rf_grid_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,rf_grid_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,rf_grid_predicted, average='micro'))\n\n\n# In[79]:\n\n\nprint('Random Forest Baseline result(macro) + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_grid_predicted))\nprint('F1-score = ', f1_score(y_test,rf_grid_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,rf_grid_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,rf_grid_predicted, average='macro'))\n\n\n# # 31\n\n# In[80]:\n\n\nfig, [[ax1, ax2, ax3],[ ax4, ax5, ax6],[ax7, ax8, ax9]] = plt.subplots(3, 3, figsize=[12,10])\n \nsns.distplot(df['Alcohol'], ax=ax1)\nsns.distplot(df['Malic acid'], ax=ax2)\nsns.distplot(df['Ash'], ax=ax3)\nsns.distplot(df['Alcalinity of ash'], ax=ax4)\nsns.distplot(df['Magnesium'], ax=ax5)\nsns.distplot(df['Total penols'], ax=ax6)\nsns.distplot(df['Flavanoids'], ax=ax7)\nsns.distplot(df['Nonflavanoids penols'], ax=ax8)\nsns.distplot(df['Proanthocyanins'], ax=ax9)\n\nfig.tight_layout()\n\n\n# # 32\n\n# In[81]:\n\n\nfig, [[ax1, ax2],[ ax3, ax4]] = plt.subplots(2, 2, figsize=[12,10])\n \nsns.distplot(df['Color intensity'], ax=ax1)\nsns.distplot(df['Hue'], ax=ax2)\nsns.distplot(df['OD280/OD315 of diluted wines'], ax=ax3)\nsns.distplot(df['Proline'], ax=ax4)\n\nfig.tight_layout()\n\n\n# # 33\n\n# In[82]:\n\n\n# Alcalinity of ash ใกล้เคียง Normal distribution\n\n\n# # 34 LDA\n\n# In[83]:\n\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n\n\n# In[84]:\n\n\nlda = LDA(n_components=2)\n\n\n# # 35\n\n# In[85]:\n\n\nX_train_lda = lda.fit_transform(X_train, y_train)\nX_test_lda = lda.transform(X_test)\n\n\n# In[86]:\n\n\nX_train_lda\n\n\n# # 36\n\n# In[87]:\n\n\nfig = plt.figure(figsize=(12,8))\nplt.scatter(X_train_lda[:,0],X_train_lda[:,1],c=y_train, cmap='coolwarm')\nplt.xlabel('LDA 1')\nplt.ylabel('LDA 2')\nplt.grid()\n\n\n# # 37 Support Vector Machine + LDA\n\n# In[88]:\n\n\nsvc.fit(X_train_lda, y_train)\n\n\n# In[89]:\n\n\nsvm_lda_predicted = svc.predict(X_test_lda)\nsvm_lda_predicted\n\n\n# # 38\n\n# In[90]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.countplot(svm_lda_predicted)\n\n\n# # 39\n\n# In[91]:\n\n\nconfusion_matrix(y_test,svm_lda_predicted)\n\n\n# # 40\n\n# In[92]:\n\n\nprint('SVM result(micro) + LDA')\n\nprint('Accuracy = ', accuracy_score(y_test,svm_lda_predicted))\nprint('F1-score = ', f1_score(y_test,svm_lda_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,svm_lda_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,svm_lda_predicted, average='micro'))\n\n\n# In[93]:\n\n\nprint('SVM result(micro) + LDA')\n\nprint('Accuracy = ', accuracy_score(y_test,svm_lda_predicted))\nprint('F1-score = ', f1_score(y_test,svm_lda_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,svm_lda_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,svm_lda_predicted, average='macro'))\n\n\n# # 41 Support Vector Machine + LDA + HP\n\n# In[94]:\n\n\nparam_combination = {'C':[0.01,0.1,1,10,100,1000,10000], 'gamma':[0.00001,0.0001,0.001,0.01,0.1,1,10]}\n\n\n# In[95]:\n\n\ngrid_search = GridSearchCV(SVC(), param_combination, verbose=3)\n\n\n# In[96]:\n\n\ngrid_search.fit(X_train_lda, y_train)\n\n\n# In[97]:\n\n\ngrid_search.best_params_\n\n\n# In[98]:\n\n\ngrid_lda_predicted = grid_search.predict(X_test_lda)\ngrid_lda_predicted\n\n\n# # 42\n\n# In[99]:\n\n\nconfusion_matrix(y_test,grid_lda_predicted)\n\n\n# # 43\n\n# In[100]:\n\n\nprint('SVM result(micro) + LDA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,grid_lda_predicted))\nprint('F1-score = ', f1_score(y_test,grid_lda_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,grid_lda_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,grid_lda_predicted, average='micro'))\n\n\n# In[101]:\n\n\nprint('SVM result(micro) + LDA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,grid_lda_predicted))\nprint('F1-score = ', f1_score(y_test,grid_lda_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,grid_lda_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,grid_lda_predicted, average='macro'))\n\n\n# # 44 RF + LDA\n\n# In[102]:\n\n\nrf.fit(X_train_lda, y_train)\n\n\n# In[103]:\n\n\nrf_lda_predicted = rf.predict(X_test_lda)\n\n\n# # 45\n\n# In[104]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.countplot(rf_lda_predicted)\n\n\n# # 46\n\n# In[105]:\n\n\nconfusion_matrix(y_test,rf_lda_predicted)\n\n\n# # 47\n\n# In[106]:\n\n\nprint('RF result(micro) + LDA')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_lda_predicted))\nprint('F1-score = ', f1_score(y_test,rf_lda_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,rf_lda_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,rf_lda_predicted, average='micro'))\n\n\n# In[107]:\n\n\nprint('RF result(micro) + LDA')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_lda_predicted))\nprint('F1-score = ', f1_score(y_test,rf_lda_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,rf_lda_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,rf_lda_predicted, average='macro'))\n\n\n# # 48 RF + LDA + HP\n\n# In[111]:\n\n\nparam_grid = {'max_depth':[4,8,16,None],\n 'n_estimators':[50,100,200,500],\n 'min_samples_split':[3,5,6,7]}\n\n\n# In[112]:\n\n\ngrid_search = GridSearchCV(RandomForestClassifier(), param_grid, verbose=3)\n\n\n# In[113]:\n\n\ngrid_search.fit(X_train_lda, y_train)\n\n\n# In[114]:\n\n\ngrid_search.best_params_\n\n\n# In[115]:\n\n\ngrid_search.best_estimator_\n\n\n# In[116]:\n\n\nrf_lda_grid_predicted = grid_search.predict(X_test_lda)\n\n\n# # 49\n\n# In[117]:\n\n\nconfusion_matrix(y_test,rf_lda_grid_predicted)\n\n\n# # 50\n\n# In[118]:\n\n\nprint('RF result(micro) + LDA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_lda_grid_predicted))\nprint('F1-score = ', f1_score(y_test,rf_lda_grid_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,rf_lda_grid_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,rf_lda_grid_predicted, average='micro'))\n\n\n# In[119]:\n\n\nprint('RF result(micro) + LDA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_lda_grid_predicted))\nprint('F1-score = ', f1_score(y_test,rf_lda_grid_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,rf_lda_grid_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,rf_lda_grid_predicted, average='macro'))\n\n\n# # 51 PCA\n\n# In[120]:\n\n\nfrom sklearn.decomposition import PCA\n\n\n# In[121]:\n\n\npca = PCA(n_components=2)\n\n\n# # 52\n\n# In[122]:\n\n\nX_train_pca = pca.fit_transform(X_train)\nX_test_pca = pca.transform(X_test)\n\n\n# In[123]:\n\n\nprint(X_train_pca.shape)\nprint(X_test_pca.shape)\n\n\n# # 52.1\n\n# In[124]:\n\n\npca.components_\n\n\n# In[126]:\n\n\nX.columns\n\n\n# In[127]:\n\n\ndf_pca = pd.DataFrame(pca.components_, columns=X.columns)\ndf_pca\n\n\n# # 52.2\n\n# In[130]:\n\n\nfig = plt.figure(figsize=(12,8))\nsns.heatmap(df_pca)\n\n\n# # 53\n\n# In[131]:\n\n\nfig = plt.figure(figsize=(12,8))\nplt.scatter(X_train_pca[:,0],X_train_pca[:,1],c=y_train, cmap='coolwarm')\nplt.xlabel('First principle component')\nplt.ylabel('Second principle component')\nplt.grid()\n\n\n# # 54 Support Vector Machine + PCA\n\n# In[132]:\n\n\nsvc.fit(X_train_pca, y_train)\n\n\n# In[133]:\n\n\nsvm_pca_predicted = svc.predict(X_test_pca)\nsvm_pca_predicted\n\n\n# # 55\n\n# In[134]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.countplot(svm_pca_predicted)\n\n\n# # 56\n\n# In[135]:\n\n\nconfusion_matrix(y_test,svm_pca_predicted)\n\n\n# # 57\n\n# In[136]:\n\n\nprint('SVM result(micro) + PCA')\n\nprint('Accuracy = ', accuracy_score(y_test,svm_pca_predicted))\nprint('F1-score = ', f1_score(y_test,svm_pca_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,svm_pca_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,svm_pca_predicted, average='micro'))\n\n\n# In[138]:\n\n\nprint('SVM result(micro) + PCA')\n\nprint('Accuracy = ', accuracy_score(y_test,svm_pca_predicted))\nprint('F1-score = ', f1_score(y_test,svm_pca_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,svm_pca_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test,svm_pca_predicted, average='macro'))\n\n\n# # 58 Support Vector Machine + PCA +HP\n\n# In[139]:\n\n\nparam_combination = {'C':[0.01,0.1,1,10,100,1000,10000], 'gamma':[0.00001,0.0001,0.001,0.01,0.1,1,10]}\n\n\n# In[140]:\n\n\ngrid_search = GridSearchCV(SVC(), param_combination, verbose=3)\n\n\n# In[141]:\n\n\ngrid_search.fit(X_train_pca, y_train)\n\n\n# In[142]:\n\n\ngrid_search.best_params_\n\n\n# In[143]:\n\n\ngrid_search.best_estimator_\n\n\n# In[144]:\n\n\ngrid_pca_predicted = grid_search.predict(X_test_pca)\ngrid_pca_predicted\n\n\n# # 59\n\n# In[146]:\n\n\nconfusion_matrix(y_test,grid_pca_predicted)\n\n\n# # 60\n\n# In[147]:\n\n\nprint('SVM result(micro) + PCA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,grid_pca_predicted))\nprint('F1-score = ', f1_score(y_test,grid_pca_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,grid_pca_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,grid_pca_predicted, average='micro'))\n\n\n# In[150]:\n\n\nprint('SVM result(micro) + PCA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,grid_pca_predicted))\nprint('F1-score = ', f1_score(y_test,grid_pca_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,grid_pca_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test, grid_pca_predicted, average='macro'))\n\n\n# # 61 RF + PCA\n\n# In[155]:\n\n\nrf.fit(X_train_pca, y_train)\n\n\n# In[156]:\n\n\nrf_pca_predicted = rf.predict(X_test_pca)\n\n\n# # 62\n\n# In[157]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.countplot(rf_pca_predicted )\n\n\n# # 63\n\n# In[158]:\n\n\nconfusion_matrix(y_test,rf_pca_predicted)\n\n\n# # 64\n\n# In[159]:\n\n\nprint('RF result(micro) + PCA')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_pca_predicted ))\nprint('F1-score = ', f1_score(y_test,rf_pca_predicted , average='micro'))\nprint('Precision = ', precision_score(y_test,rf_pca_predicted , average='micro'))\nprint('Recall = ', recall_score(y_test,rf_pca_predicted , average='micro'))\n\n\n# In[160]:\n\n\nprint('RF result(micro) + PCA')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_pca_predicted ))\nprint('F1-score = ', f1_score(y_test,rf_pca_predicted , average='macro'))\nprint('Precision = ', precision_score(y_test,rf_pca_predicted , average='macro'))\nprint('Recall = ', recall_score(y_test,rf_pca_predicted , average='macro'))\n\n\n# # 65 RF + PCA + HP\n\n# In[161]:\n\n\nparam_grid = {'max_depth':[4,8,16,None],\n 'n_estimators':[50,100,200,500],\n 'min_samples_split':[3,5,6,7]}\n\n\n# In[162]:\n\n\ngrid_search = GridSearchCV(RandomForestClassifier(), param_grid, verbose=3)\n\n\n# In[163]:\n\n\ngrid_search.fit(X_train_pca, y_train)\n\n\n# In[164]:\n\n\ngrid_search.best_params_\n\n\n# In[165]:\n\n\ngrid_search.best_estimator_\n\n\n# In[166]:\n\n\nrf_pca_grid_predicted = grid_search.predict(X_test_pca)\n\n\n# # 66\n\n# In[168]:\n\n\nconfusion_matrix(y_test,rf_pca_grid_predicted)\n\n\n# # 67\n\n# In[170]:\n\n\nprint('RF result(micro) + PCA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_pca_grid_predicted))\nprint('F1-score = ', f1_score(y_test,rf_pca_grid_predicted, average='micro'))\nprint('Precision = ', precision_score(y_test,rf_pca_grid_predicted, average='micro'))\nprint('Recall = ', recall_score(y_test,rf_pca_grid_predicted, average='micro'))\n\n\n# In[171]:\n\n\nprint('RF result(micro) + PCA + Grid')\n\nprint('Accuracy = ', accuracy_score(y_test,rf_pca_grid_predicted))\nprint('F1-score = ', f1_score(y_test,rf_pca_grid_predicted, average='macro'))\nprint('Precision = ', precision_score(y_test,rf_pca_grid_predicted, average='macro'))\nprint('Recall = ', recall_score(y_test, rf_pca_grid_predicted, average='macro'))\n\n\n# # 68 Before HT and Macro\n\n# In[182]:\n\n\ncolumns = ['method','score','score_type']\nvalues = [['SVM',0.9861,'Accuracy'],\n ['SVM',0.9869,'F1 Score'],\n ['SVM',0.9876,'Precision'],\n ['SVM',0.9866,'Recall'],\n ['RF',0.9722,'Accuracy'],\n ['RF',0.9723,'F1 Score'],\n ['RF',0.9720,'Precision'],\n ['RF',0.9743,'Recall'],\n ['SVM+LDA',0.9583,'Accuracy'],\n ['SVM+LDA',0.9579,'F1 Score'],\n ['SVM+LDA',0.9581,'Precision'],\n ['SVM+LDA',0.9615,'Recall'],\n ['RF+LDA',0.9722,'Accuracy'],\n ['RF+LDA',0.9723,'F1 Score'],\n ['RF+LDA',0.9720,'Precision'],\n ['RF+LDA',0.9743,'Recall'],\n ['SVM+PCA',0.9583,'Accuracy'],\n ['SVM+PCA',0.9573,'F1 Score'],\n ['SVM+PCA',0.9583,'Precision'],\n ['SVM+PCA',0.9615,'Recall'],\n ['RF+PCA',0.8888,'Accuracy'],\n ['RF+PCA',0.8893,'F1 Score'],\n ['RF+PCA',0.8907,'Precision'],\n ['RF+PCA',0.8903,'Recall']]\n\n\n# In[183]:\n\n\ndf_results_before_HT = pd.DataFrame(values, columns=columns)\n\ndf_results_before_HT\n\n\n# In[184]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.barplot(x='method', y='score', hue='score_type', data=df_results_before_HT)\n\n\n# # 69 After HT and Micro\n\n# In[185]:\n\n\ncolumns = ['method','score','score_type']\nvalues = [['SVM',0.9583,'Accuracy'],\n ['SVM',0.9583,'F1 Score'],\n ['SVM',0.9583,'Precision'],\n ['SVM',0.9583,'Recall'],\n ['RF',0.9861,'Accuracy'],\n ['RF',0.9861,'F1 Score'],\n ['RF',0.9861,'Precision'],\n ['RF',0.9861,'Recall'],\n ['SVM+LDA',0.9583,'Accuracy'],\n ['SVM+LDA',0.9583,'F1 Score'],\n ['SVM+LDA',0.9583,'Precision'],\n ['SVM+LDA',0.9583,'Recall'],\n ['RF+LDA',0.9583,'Accuracy'],\n ['RF+LDA',0.9583,'F1 Score'],\n ['RF+LDA',0.9583,'Precision'],\n ['RF+LDA',0.9583,'Recall'],\n ['SVM+PCA',0.9583,'Accuracy'],\n ['SVM+PCA',0.9583,'F1 Score'],\n ['SVM+PCA',0.9583,'Precision'],\n ['SVM+PCA',0.9583,'Recall'],\n ['RF+PCA',0.9583,'Accuracy'],\n ['RF+PCA',0.9583,'F1 Score'],\n ['RF+PCA',0.9583,'Precision'],\n ['RF+PCA',0.9583,'Recall']]\n\n\n# In[186]:\n\n\ndf_results_After_HT = pd.DataFrame(values, columns=columns)\n\ndf_results_After_HT\n\n\n# In[188]:\n\n\nfig = plt.figure(figsize=(8,6))\nsns.barplot(x='method', y='score', hue='score_type', data=df_results_After_HT)\n\n\n# # 70 \n\n# In[189]:\n\n\nimport plotly.express as px\n\n\n# In[190]:\n\n\ncolumns = ['method','score','score_type']\nvalues = [['SVM+LDA',0.9583,'Accuracy'],\n ['RF+LDA',0.9583,'Accuracy'],\n ['SVM+PCA',0.9583,'Accuracy'],\n ['RF+PCA',0.9583,'Accuracy']]\n\n\ndf_results = pd.DataFrame(values, columns=columns)\n\ndf_results\n\n\n# In[191]:\n\n\nfig = px.bar(df_results, x='method', y='score', title='ACC Comparision')\nfig.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"theurk/Datacamp1","sub_path":"28 PROJECT IV - Dimentionality Reduction/Day 28 PROJECT IV - Dimentionality Reduction.py","file_name":"Day 28 PROJECT IV - Dimentionality Reduction.py","file_ext":"py","file_size_in_byte":21539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71269241769","text":"# -*- coding: utf-8 -*-\nimport datetime\n\nfrom fastapi import HTTPException, status , Response, APIRouter, Depends, UploadFile, File\nfrom .. import squema, oauth2 # mi modelo pydentic para validar request\nfrom ..database import db, bucket\nfrom firebase_admin import firestore\n\n\nrouter = APIRouter(\n tags=['Post'])\nprint(\"estamos en router. post\")\n\n@router.post(\"/uploadfile\")\nasync def create_upload_file(file: UploadFile = File(...)):\n # hacemos algo con el archivo recibido...\n myFile = file.file\n myFile.seek(0,2)\n size = myFile.tell()\n myFile.seek(0)\n blob = bucket.blob(\"myFiles/\"+file.filename)\n blob.upload_from_file(myFile)\n url = blob.generate_signed_url(\n version=\"v4\",\n # This URL is valid for 15 minutes\n expiration=datetime.timedelta(minutes=15),\n # Allow GET requests using this URL.\n method=\"GET\")\n print (f'\\nobjeto almacenado con nombre: {blob.name}\\n')\n return {\"filename: \": file.filename,\n \"content_type: \":file.content_type,\n \"size: \":size,\n \"URL: \":url}\n\n\n@router.get(\"/posts\")\nasync def get_posts(get_current_user:str= Depends(oauth2.get_current_user)):\n posts_ref = db.collection('posts')\n docs = posts_ref.order_by(\n 'updated_at',direction=firestore.Query.DESCENDING).stream()\n myData =[]\n for doc in docs:\n post = squema.modelo_respuesta(doc)\n myData.append(post)\n return {\"Cantidad\":len(myData),\n \"data\": myData,\n \"current_user\":get_current_user}\n\n\n@router.post(\"/posts\")\ndef create_psot(post: squema.create_Post, get_current_user:str= Depends(oauth2.get_current_user)):\n postByUser = post.dict()\n postByUser ['user'] = get_current_user\n ref_postAgregado =db.collection(\"posts\").document()\n ref_postAgregado.set(postByUser)\n return {\"data\" : ref_postAgregado.get().to_dict(),\n \"ID\":ref_postAgregado.id\n }\n\n@router.get(\"/posts/{id}\")\ndef get_post(id:str):\n ref_post = db.collection('posts').document(id)\n if not ref_post.get().exists:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'post id: {id} was not found')\n return {\"data\":ref_post.get().to_dict()}\n\n@router.delete(\"/posts/{id}\", status_code=status.HTTP_204_NO_CONTENT)\ndef delete_post (id:str):\n ref_post = db.collection('posts').document(id)\n if not ref_post.get().exists:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'post id: {id} was not found')\n ref_post.delete()\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n\n@router.put(\"/posts/{id}\")\ndef update_post (id: str, post:squema.update_Post):\n ref_post = db.collection('posts').document(id)\n if not ref_post.get().exists:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'post with id : {id} was not found')\n ref_post.update(post.dict())\n return {'message':f'post {id} updated'}","repo_name":"EmanuelCalderoni/fastapi_Server","sub_path":"app/routers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37394573599","text":"import torch\r\nimport cv2\r\nimport numpy as np\r\nimport threading\r\n\r\n\r\ncap_right = cv2.VideoCapture(0)\r\nmodel = torch.hub.load('ultralytics/yolov5', 'yolov5s') \r\nresults_lock = threading.Lock()\r\nresults=None\r\nstop_flag=False\r\n\r\ndef detect_object():\r\n global results,stop_flag\r\n while True:\r\n ret_right, frame_right = cap_right.read()\r\n\r\n if ret_right==False:\r\n break\r\n else:\r\n _results = model(frame_right)\r\n # for res in results.xyxy[0]:\r\n # print(results.names[int(res[5])])\r\n with results_lock:\r\n results=_results\r\n\r\n cv2.imshow(\"frame-right\",np.squeeze(results.render()))\r\n\r\n if cv2.waitKey(1) & 0xFF == 27:\r\n break\r\n cap_right.release()\r\n cv2.destroyAllWindows()\r\n stop_flag=True\r\n\r\ndef print_results():\r\n global results,stop_flag\r\n while not stop_flag:\r\n with results_lock:\r\n if results is not None:\r\n for res in results.xyxy[0]:\r\n print(results.names[int(res[5])])\r\n\r\nthread1=threading.Thread(target=detect_object)\r\nthread2=threading.Thread(target=print_results)\r\n\r\nthread1.start()\r\nthread2.start()\r\n\r\nthread1.join()\r\nthread2.join()\r\n\r\n","repo_name":"NishaniKasineshan/Image-processing","sub_path":"multithreading_object_detection.py","file_name":"multithreading_object_detection.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70006551849","text":"from ledcontrol import ahrs, wled\nfrom ledcontrol.math import scale\n\nWLED_DEVICE = \"/dev/cu.usbmodem7111101\"\nAHRS_DEVICE = \"/dev/cu.usbserial-0001\"\n\nLED_COUNT = 120\nBRIGHTNESS = 0.1\nSENSITIVITY = 2\n\n\ndef main():\n ahrs_interface = ahrs.SerialAHRS(AHRS_DEVICE)\n wled_interface = wled.SerialWLED(WLED_DEVICE)\n\n while True:\n attitude_sample, imu_sample, timing_sample = ahrs_interface.read()\n scaled_roll = scale(attitude_sample.roll, -90, 90, 0, LED_COUNT * SENSITIVITY)\n\n # Rainbow mode\n # colours = [(i / (float(LED_COUNT)), 1, 1) for i in range(LED_COUNT)]\n # leds = [colours[int(i + scaled_roll) % 120] for i in range(LED_COUNT)]\n\n # Cursor mode\n leds = [(1, 1, 1) for i in range(LED_COUNT)]\n leds[int(scaled_roll % 120)] = (0.4, 1, 1)\n leds[int((scaled_roll - 1) % 120)] = (0.55, 1, 1)\n leds[int((scaled_roll + 1) % 120)] = (0.55, 1, 1)\n\n wled_interface.flush_hsv(leds, brightness=BRIGHTNESS)\n\n if ahrs_interface.counter.ready():\n print(f\"AHRS: {ahrs_interface.counter.sample()} Hz\")\n\n if wled_interface.counter.ready():\n print(f\"WLED: {wled_interface.counter.sample()} Hz\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pierrekin/ledcontrol","sub_path":"scripts/ahrs_example.py","file_name":"ahrs_example.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2726391595","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis enables to parameterize unit tests - the tests are run by GitHub Actions each time you commit to the github repo\n\"\"\"\n\n#########\n#\n# Help on Tests:\n#\n##########\n\n# Some useful commands:\n#\n# pytest tests.py\n# pytest -k TestDemoClass tests.py\n# pytest -k \"test_ok\" tests.py\n\n# Start the interactive Python debugger on errors or KeyboardInterrupt.\n# pytest tests.py --pdb\n\n# --collect-only, --co only collect tests, don't execute them.\n# pytest tests.py --co\n\n# -v run the tests in verbose mode, outputting one line per test\n# pytest -v tests.py\n\n# A \"test_\" prefix in classes and contributivity_methods is needed to make a test discoverable by pytest\n\n# Main documentation:\n# https://docs.pytest.org/en/latest/contents.html\n\n# Gettig Started\n# https://docs.pytest.org/en/latest/getting-started.html#group-multiple-tests-in-a-class\n\n# Parametrize to generate parameters combinations\n# https://docs.pytest.org/en/latest/example/parametrize.html#paramexamples\n\n# Fixture to initialize test functions\n# https://docs.pytest.org/en/latest/fixture.html\n\n# Test architecture\n# https://docs.pytest.org/en/latest/goodpractices.html#test-discovery\n\nimport numpy as np\nimport pytest\nfrom ruamel.yaml import YAML\n\nfrom mplc import utils\nfrom mplc.contributivity import Contributivity\nfrom mplc.corruption import Permutation, PermutationCircular, Randomize, Redundancy, RandomizeUniform, Duplication\nfrom mplc.dataset import Mnist, Cifar10, Titanic, Imdb, Esc50\nfrom mplc.experiment import Experiment\nfrom mplc.multi_partner_learning.basic_mpl import FederatedAverageLearning\nfrom mplc.multi_partner_learning.utils import UniformAggregator\nfrom mplc.partner import Partner\nfrom mplc.scenario import Scenario\n# create_Mpl uses create_Dataset and create_Contributivity uses create_Scenario\nfrom mplc.splitter import FlexibleSplitter, AdvancedSplitter, RandomSplitter, StratifiedSplitter\n\n\n######\n#\n# Fixture definitions\n#\n######\n\n\n@pytest.fixture(scope=\"class\", params=(Mnist, Cifar10, Titanic, Imdb, Esc50))\ndef create_all_datasets(request):\n return request.param()\n\n\n@pytest.fixture(scope=\"class\")\ndef create_MultiPartnerLearning(create_all_datasets):\n data = create_all_datasets\n # Create partners_list (this is not a fixture):\n scenario = Scenario(3, [0.3, 0.3, 0.4], dataset=data)\n mpl = FederatedAverageLearning(\n scenario,\n epoch_count=2,\n minibatch_count=2,\n dataset=data,\n aggregation=UniformAggregator,\n is_early_stopping=True,\n is_save_data=False,\n )\n\n yield mpl\n\n\n@pytest.fixture(scope=\"class\", params=(RandomSplitter([0.1, 0.2, 0.3, 0.4]),\n StratifiedSplitter([0.1, 0.2, 0.3, 0.4]),\n AdvancedSplitter([0.3, 0.5, 0.2],\n [[4, \"specific\"], [6, \"shared\"], [4, \"shared\"]]),\n FlexibleSplitter([1.0, 0.0, 0.0], [\n [0.33, 0.33, 0.33, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n ])))\ndef create_splitter(request):\n return request.param()\n\n\n@pytest.fixture(scope='class')\ndef create_Partner(create_all_datasets):\n data = create_all_datasets\n partner = Partner(0)\n partner.y_train = data.y_train[:int(len(data.y_train) / 10)]\n partner.x_train = data.x_train[:int(len(data.x_train) / 10)]\n return partner\n\n\n@pytest.fixture(scope=\"class\",\n params=((Mnist, \"random\", ['not-corrupted'] * 3),\n (Mnist, \"random\", ['permutation', Redundancy(0.2), Duplication(duplicated_partner_id=0)]),\n (Mnist,\n AdvancedSplitter([0.3, 0.5, 0.2], [[4, \"specific\"], [6, \"shared\"], [4, \"shared\"]]),\n ['not-corrupted'] * 3),\n (Cifar10, \"random\", ['not-corrupted'] * 3),\n (Cifar10,\n FlexibleSplitter([0.3, 0.5, 0.2], [[0.33, 0.33, 0.33, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0]]),\n ['not-corrupted'] * 3)),\n ids=['Mnist - basic',\n 'Mnist - basic - corrupted',\n 'Mnist - advanced',\n 'Cifar10 - basic',\n 'Cifar10 - flex'])\ndef create_Scenario(request):\n dataset = request.param[0]()\n samples_split_option = request.param[1]\n corruption = request.param[2]\n params = {\"dataset\": dataset}\n params.update(\n {\n \"partners_count\": 3,\n \"amounts_per_partner\": [0.3, 0.5, 0.2],\n \"samples_split_option\": samples_split_option,\n \"corruption_parameters\": corruption,\n }\n )\n params.update(\n {\n \"contributivity_methods\": [\"Shapley values\", \"Independent scores\"],\n \"multi_partner_learning_approach\": \"fedavg\",\n \"aggregation\": \"uniform\",\n }\n )\n params.update(\n {\n \"gradient_updates_per_pass_count\": 5,\n \"epoch_count\": 2,\n \"minibatch_count\": 2,\n \"is_early_stopping\": True,\n }\n )\n params.update({\"init_model_from\": \"random_initialization\"})\n params.update({\"is_quick_demo\": False})\n\n # scenario_.dataset object is created inside the Scenario constructor\n scenario_ = Scenario(\n **params, scenario_id=0\n )\n\n scenario_.mpl = scenario_._multi_partner_learning_approach(scenario_, is_save_data=True)\n\n return scenario_\n\n\n@pytest.fixture(scope='class')\ndef create_experiment():\n return Experiment(experiment_name='test_exp', nb_repeats=10, is_save=True)\n\n\n@pytest.fixture(scope=\"class\")\ndef create_Contributivity(create_Scenario):\n scenario = create_Scenario\n contributivity = Contributivity(scenario=scenario)\n\n return contributivity\n\n\n######\n#\n# Tests modules with Objects\n#\n######\n\nclass Test_Experiment:\n def test_add_scenario(self, create_experiment):\n exp = create_experiment\n sc = Scenario(2, [0.5, 0.5], dataset='titanic')\n assert len(exp.scenarios_list) == 0, 'Scenario list should be empty when initialized'\n exp.add_scenario(sc)\n assert exp.scenarios_list[0] is sc, 'Failed to add a scenario'\n\n def test_raise_error_when_adding_a_string(self, create_experiment):\n exp = create_experiment\n with pytest.raises(Exception):\n exp.scenarios_list[0] = 'not a scenario'\n with pytest.raises(Exception):\n exp.scenarios_list.append('Still not a scenario')\n with pytest.raises(Exception):\n exp.add_scenario('for the last time, a string is NOT a scenario')\n\n def test_def_path(self, create_experiment):\n exp = create_experiment\n exp.name = 'new_name'\n path = exp.define_experiment_path()\n assert path != exp.experiment_path, 'the path should have changed'\n assert 'new_name' in str(path), 'the new name should be in path'\n assert path.exists(), f'{path} should exist'\n\n\nclass Test_Scenario:\n def test_scenar(self, create_Scenario):\n assert type(create_Scenario) == Scenario\n\n def test_raiseException(self, create_Scenario):\n scenario = create_Scenario\n with pytest.raises(Exception):\n scenario.instantiate_scenario_partners()\n\n\nclass Test_Mpl:\n def test_Mpl(self, create_MultiPartnerLearning):\n mpl = create_MultiPartnerLearning\n assert type(mpl) == FederatedAverageLearning\n\n\nclass Test_Contributivity:\n def test_Contributivity(self, create_Contributivity):\n contri = create_Contributivity\n assert type(contri) == Contributivity\n\n\n######\n#\n# Test supported datasets\n#\n######\n\nclass Test_Dataset:\n\n def test_train_split_global(self, create_all_datasets):\n \"\"\"train_val_split is used once, just after Dataset being instantiated\n - this is written to prevent its call from another place\"\"\"\n data = create_all_datasets\n assert len(data.x_val) < len(data.x_train)\n assert len(data.x_test) < len(data.x_train)\n with pytest.raises(Exception):\n data.train_val_split_global()\n\n def test_data_shape(self, create_all_datasets):\n data = create_all_datasets\n assert len(data.x_train) == len(data.y_train), \"Number of train label is not equal to the number of data\"\n assert len(data.x_val) == len(data.y_val), \"Number of val label is not equal to the number of data\"\n assert len(data.x_test) == len(data.y_test), \"Number of test label is not equal to the number of data\"\n\n if data.num_classes > 2:\n assert data.y_train[0].shape == (data.num_classes,)\n assert data.y_val[0].shape == (data.num_classes,)\n assert data.y_test[0].shape == (data.num_classes,)\n assert data.x_train[0].shape == data.input_shape\n assert data.x_test[0].shape == data.input_shape\n assert data.x_val[0].shape == data.input_shape\n\n def test_generate_new_model(self, create_all_datasets):\n dataset = create_all_datasets\n model = dataset.generate_new_model()\n assert callable(model.fit), \".fit() method is required for model\"\n assert callable(model.evaluate), \".evaluate() method is required for model\"\n assert callable(model.save_weights), \".save_weights() method is required for model\"\n assert callable(model.load_weights), \".load_weights() method is required for model\"\n assert callable(model.get_weights), ' .get_weights() method is required for model'\n assert callable(model.set_weights), \".set_weights() method is required for model\"\n\n\n######\n#\n# Test supported Splitter\n#\n######\n\nclass Test_Splitter:\n def test_random_splitter_global(self, create_all_datasets):\n splitter = RandomSplitter([0.1, 0.2, 0.3, 0.4])\n dataset = create_all_datasets\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) == 0, \"validation set is not empty in spite of the val_set == 'global'\"\n assert len(p.y_test) == 0, \"test set is not empty in spite of the val_set == 'global'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n assert (p.final_nb_samples / len(dataset.y_train) - splitter.amounts_per_partner[p.id]) \\\n < (1 / len(dataset.y_train)), \"Amounts of data not respected\"\n\n def test_stratified_splitter_global(self, create_all_datasets):\n splitter = StratifiedSplitter([0.1, 0.2, 0.3, 0.4])\n dataset = create_all_datasets\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) == 0, \"validation set is not empty in spite of the val_set == 'global'\"\n assert len(p.y_test) == 0, \"test set is not empty in spite of the val_set == 'global'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n if dataset.num_classes >= 3:\n assert len(p.labels) < dataset.num_classes, f'Partner {p.id} has all labels.'\n assert (p.final_nb_samples / len(dataset.y_train) - splitter.amounts_per_partner[p.id]) \\\n < (1 / len(dataset.y_train)), \"Amounts of data not respected\"\n\n def test_random_splitter_local(self, create_all_datasets):\n splitter = RandomSplitter([0.1, 0.2, 0.3, 0.4], val_set='local', test_set='local')\n dataset = create_all_datasets\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) > 0, \"validation set is empty in spite of the val_set == 'local'\"\n assert len(p.y_test) > 0, \"test set is empty in spite of the val_set == 'local'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n assert (p.final_nb_samples / len(dataset.y_train) - splitter.amounts_per_partner[p.id]) \\\n < (1 / len(dataset.y_train)), \"Amounts of data not respected\"\n\n def test_stratified_splitter_local(self, create_all_datasets):\n splitter = StratifiedSplitter([0.1, 0.2, 0.3, 0.4], val_set='local', test_set='local')\n dataset = create_all_datasets\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) > 0, \"validation set is empty in spite of the val_set == 'local'\"\n assert len(p.y_test) > 0, \"test set is empty in spite of the val_set == 'local'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n if dataset.num_classes >= 3:\n assert len(p.labels) < dataset.num_classes, f'Partner {p.id} has all labels.'\n assert (p.final_nb_samples / len(dataset.y_train) - splitter.amounts_per_partner[p.id]) \\\n < (1 / len(dataset.y_train)), \"Amounts of data not respected\"\n\n def test_advanced_splitter_global(self, create_all_datasets):\n dataset = create_all_datasets\n splitter = AdvancedSplitter([0.3, 0.3, 0.4], configuration=[[4 * (dataset.num_classes // 10), \"specific\"],\n [6 * (dataset.num_classes // 10), \"shared\"],\n [4 * (dataset.num_classes // 10), \"shared\"]])\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n if dataset.num_classes >= 10:\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) == 0, \"validation set is not empty in spite of the val_set == 'global'\"\n assert len(p.y_test) == 0, \"test set is not empty in spite of the val_set == 'global'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n if dataset.num_classes >= 3:\n assert len(p.labels) < dataset.num_classes, f'Partner {p.id} has all labels.'\n else:\n with pytest.raises(Exception):\n splitter.split(partners_list, dataset)\n\n def test_advanced_splitter_local(self, create_all_datasets):\n dataset = create_all_datasets\n splitter = AdvancedSplitter([0.3, 0.3, 0.4], configuration=[[4 * (dataset.num_classes // 10), \"specific\"],\n [6 * (dataset.num_classes // 10), \"shared\"],\n [4 * (dataset.num_classes // 10), \"shared\"]],\n val_set='local', test_set='local')\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n if dataset.num_classes >= 10:\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) > 0, \"validation set is empty in spite of the val_set == 'local'\"\n assert len(p.y_test) > 0, \"test set is empty in spite of the val_set == 'local'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n if dataset.num_classes >= 3:\n assert len(p.labels) < dataset.num_classes, f'Partner {p.id} has all labels.'\n else:\n with pytest.raises(Exception):\n splitter.split(partners_list, dataset)\n\n def test_flexible_splitter_global(self, create_all_datasets):\n dataset = create_all_datasets\n splitter = FlexibleSplitter([0.3, 0.3, 0.4], configuration=[\n [0.33, 0.33, 0.33, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0]])\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n if dataset.num_classes == 10:\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) == 0, \"validation set is not empty in spite of the val_set == 'global'\"\n assert len(p.y_test) == 0, \"test set is not empty in spite of the val_set == 'global'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n assert len(p.labels) < dataset.num_classes, f'Partner {p.id} has all labels.'\n\n def test_flexible_splitter_local(self, create_all_datasets):\n dataset = create_all_datasets\n splitter = FlexibleSplitter([0.3, 0.3, 0.4], configuration=[\n [0.33, 0.33, 0.33, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.33, 0.33, 0.33, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0]],\n val_set='local', test_set='local')\n partners_list = [Partner(i) for i in range(len(splitter.amounts_per_partner))]\n if dataset.num_classes == 10:\n splitter.split(partners_list, dataset)\n for p in partners_list:\n assert len(p.y_val) > 0, \"validation set is empty in spite of the val_set == 'local'\"\n assert len(p.y_test) > 0, \"test set is empty in spite of the val_set == 'local'\"\n assert len(p.x_train) == len(p.y_train), 'labels and samples numbers mismatches'\n assert len(p.labels) < dataset.num_classes, f'Partner {p.id} has all labels.'\n\n\n######\n#\n# Test supported corruption\n#\n######\n\nclass Test_Corruption:\n def test_permutation_circular(self, create_Partner):\n partner = create_Partner\n partner.corruption = PermutationCircular(partner=partner)\n partner.corrupt()\n assert ((partner.y_train == 0) + (partner.y_train == 1)).all()\n if partner.y_train.ndim > 1:\n assert partner.y_train[-1].max() == 1\n assert partner.y_train[-1].sum() == 1\n\n def test_permutation(self, create_Partner):\n partner = create_Partner\n partner.corruption = Permutation(partner=partner)\n partner.corrupt()\n assert ((partner.y_train == 0) + (partner.y_train == 1)).all()\n ones_vect = np.ones(partner.corruption.matrix.shape[1])\n assert (partner.corruption.matrix.sum(axis=1) == ones_vect).all()\n assert (partner.corruption.matrix.sum(axis=0) == ones_vect.T).all()\n\n def test_random(self, create_Partner):\n partner = create_Partner\n partner.corruption = Randomize(partner=partner)\n partner.corrupt()\n assert ((partner.y_train == 0) + (partner.y_train == 1)).all()\n if partner.y_train.ndim > 1:\n assert partner.y_train[-1].max() == 1\n assert partner.y_train[-1].sum() == 1\n ones_vect = np.ones(partner.corruption.matrix.shape[1])\n assert (partner.corruption.matrix.sum(axis=1).round(1) == ones_vect).all()\n\n def test_random_uniform(self, create_Partner):\n partner = create_Partner\n partner.corruption = RandomizeUniform(partner=partner)\n partner.corrupt()\n assert ((partner.y_train == 0) + (partner.y_train == 1)).all()\n if partner.y_train.ndim > 1:\n assert partner.y_train[-1].max() == 1\n assert partner.y_train[-1].sum() == 1\n assert (partner.corruption.matrix == partner.corruption.matrix[0][0]).all(), 'Distribution isn\\'t uniform'\n\n def test_redundancy(self, create_Partner):\n partner = create_Partner\n partner.corruption = Redundancy(partner=partner)\n partner.corrupt()\n assert (partner.y_train == partner.y_train[0]).all()\n assert (partner.x_train == partner.x_train[0]).all()\n\n\n#####\n#\n# Test Demo and config files\n#\n######\n\n\nclass _TestDemoClass:\n def test_ok(self):\n \"\"\"\n Demo test\n \"\"\"\n ok = \"ok\"\n assert \"ok\" in ok\n\n def test_ko(self):\n \"\"\"\n Demo test 2\n \"\"\"\n ko = \"ko\"\n assert \"ok\" not in ko\n\n def test_load_cfg(self):\n \"\"\"\n Check if the two config files are present\n and loaded with the load_cfg method\n \"\"\"\n config_file = utils.load_cfg(\"config.yml\")\n config_quick_debug_file = utils.load_cfg(\"config_quick_debug.yml\")\n assert config_file and config_quick_debug_file\n\n def test_load_config_files(self):\n \"\"\"\n Check if the two config files are present\n and loaded with the load method\n \"\"\"\n yaml = YAML(typ='safe')\n with open(\"config.yml\", \"r\") as config_file:\n assert yaml.load(config_file)\n with open(\"config_quick_debug.yml\", \"r\") as config_quick_debug_file:\n assert yaml.load(config_quick_debug_file)\n","repo_name":"LabeliaLabs/distributed-learning-contributivity","sub_path":"tests/unit_tests.py","file_name":"unit_tests.py","file_ext":"py","file_size_in_byte":21821,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"53"}
+{"seq_id":"16373958653","text":"import logging\nimport torch\nimport torch.nn as nn\n\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\nfrom typing import List\n\nfrom framenet_tools.config import ConfigManager\nfrom framenet_tools.utils.static_utils import shuffle_concurrent_lists\n\n\nclass Net(nn.Module):\n def __init__(\n self,\n embedding_size: int,\n frame_embedding_size: int,\n hidden_sizes: list,\n layers: list,\n num_classes: int,\n device: torch.device,\n embedding_layer: torch.nn.Embedding,\n ):\n super(Net, self).__init__()\n\n self.device = device\n\n self.embedding_layer = embedding_layer\n\n self.input_size = 401\n self.hidden_size = 450\n self.hidden_size2 = 200\n\n # Dynamic instantiation of the activation function\n # act_func = getattr(nn, span_layers[i])().to(self.device)\n\n logging.debug(f\"Hidden sizes: {hidden_sizes}\")\n logging.debug(f\"Activation functions: {layers}\")\n\n self.hidden_layers = []\n last_size = embedding_size + frame_embedding_size + 4\n\n for i in range(len(hidden_sizes)):\n\n if layers[i].lower() == \"dropout\":\n # Add dropout\n self.add_module(str(i), nn.Dropout(hidden_sizes[i]))\n self.hidden_layers.append(getattr(self, str(i)))\n\n continue\n\n hidden_sizes[i] = int(hidden_sizes[i])\n\n self.add_module(\n str(i),\n getattr(nn, layers[i])(last_size, hidden_sizes[i], bidirectional=True).to(\n self.device\n ),\n )\n\n # Saving function ref\n self.hidden_layers.append(getattr(self, str(i)))\n\n # Double due to the bidirectional processing\n last_size = hidden_sizes[i] * 2\n\n # Last layer\n self.hidden_to_tag = nn.Linear(last_size, num_classes)\n\n def forward(self, x):\n\n sent_len = len(x)\n\n x = torch.tensor(x).to(self.device)\n\n embedded = self.embedding_layer(x[:, :1].type(torch.long))\n\n x = torch.cat((embedded.view(len(embedded), -1), x[:, 1:]), 1)\n\n x = x.view(sent_len, 1, -1)\n\n x = Variable(x).to(self.device)\n\n # As every sequence is porcessed at once, only the outputs are required\n for hidden_layer in self.hidden_layers:\n if isinstance(hidden_layer, nn.Dropout):\n x = hidden_layer(x)\n continue\n\n x, _ = hidden_layer(x)\n\n outputs = []\n\n for i in x:\n outputs.append(self.hidden_to_tag(i))\n\n outputs = torch.stack(outputs, 1).squeeze(2)\n\n return outputs\n\n\nclass SpanIdNetwork(object):\n def __init__(\n self, cM: ConfigManager, num_classes: int, embedding_layer: torch.nn.Embedding,\n ):\n\n self.cM = cM\n self.best_acc = 0\n\n # Check for CUDA\n use_cuda = self.cM.use_cuda and torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n logging.debug(f\"Device used: {self.device}\")\n\n self.num_classes = num_classes\n\n self.net = Net(\n self.cM.embedding_size,\n 100,\n self.cM.span_hidden_sizes,\n self.cM.span_layers,\n num_classes,\n self.device,\n embedding_layer,\n )\n\n self.net.to(self.device)\n\n # Loss and Optimizer\n self.criterion = nn.CrossEntropyLoss()\n self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.cM.span_learning_rate)\n\n def predict(self, sent: List[int]):\n \"\"\"\n Predicts the BIO-Tags of a given sentence.\n\n :param sent: The sentence to predict (already converted by the vocab)\n :return: A list of possibilities for each word for each tag\n \"\"\"\n\n self.reset_hidden()\n\n outputs = self.net(sent)\n\n return outputs.to(\"cpu\")\n\n def reset_hidden(self):\n \"\"\"\n Resets the hidden states of the LSTM.\n\n :return:\n \"\"\"\n\n # NOT needed anymore\n # self.net.hidden = self.net.init_hidden()\n # self.net.hidden2 = self.net.init_hidden2()\n\n def train_model(\n self,\n xs: List[torch.tensor],\n ys: List[List[int]],\n dev_xs: List[torch.tensor] = None,\n dev_ys: List[List[int]] = None,\n ):\n \"\"\"\n Trains the model with the given dataset\n Uses the model specified in net\n\n :param xs: The training sequences, given as a list of tensors\n :param ys: The labels of the sequences\n :param dev_xs: The development sequences, given as a list of tensors\n :param dev_ys: The labels of the sequences\n :return:\n \"\"\"\n\n dataset_size = len(xs)\n\n for epoch in range(self.cM.span_num_epochs):\n\n total_loss = 0\n total_hits = 0\n perf_match = 0\n count = 0\n occ = 0\n\n shuffle_concurrent_lists([xs, ys])\n\n with tqdm(\n zip(xs, ys),\n position=0,\n desc=f\"[Epoch: {epoch+1}/{self.cM.span_num_epochs}] Iteration\",\n ) as progress_bar:\n\n for x, y in progress_bar:\n\n output_dim = len(x)\n\n labels = Variable(torch.tensor(y)).to(self.device)\n labels = torch.reshape(labels, (1, output_dim))\n\n self.reset_hidden()\n\n # Forward + Backward + Optimize\n self.optimizer.zero_grad() # zero the gradient buffer\n outputs = self.net(x)\n\n outputs = torch.reshape(outputs, (1, 3, output_dim))\n\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n total_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n\n su = sum(predicted[0])\n\n occ += su\n total_hits += (predicted == labels).sum().item() / len(predicted[0])\n if (predicted == labels).sum().item() == len(predicted[0]):\n perf_match += 1\n\n count += 1\n\n # Just update every 20 iterations\n if count % 20 == 0:\n train_loss = round((total_loss / count), 4)\n train_acc = round((total_hits / count), 4)\n perf_acc = round((perf_match / count), 4)\n progress_bar.set_postfix(\n Loss=train_loss,\n Acc=train_acc,\n Perfect=perf_acc,\n Frames=f\"{count}/{dataset_size}\",\n OccSpans=occ,\n )\n\n self.eval_dev(dev_xs, dev_ys)\n\n def eval_dev(self, xs: List[torch.tensor] = None, ys: List[List[int]] = None):\n \"\"\"\n Evaluates the model directly on the a prepared dataset\n\n :param xs: The development sequences, given as a list of tensors\n :param ys: The labels of the sequence\n :return:\n \"\"\"\n\n hits = 0\n span_hits = 0\n total = 0\n\n for x, y in zip(xs, ys):\n bio_tags = self.predict(x)[0]\n\n bio_tags = torch.argmax(bio_tags, 1)\n\n for gold, pred in zip(y, bio_tags):\n if gold == pred:\n hits += 1\n\n total += len(y)\n\n acc = round((hits / total), 4)\n\n print(f\"DEV-Acc: {acc} Span-acc: {round((span_hits/total),4)}\")\n\n if acc > self.best_acc:\n self.best_acc = acc\n self.save_model(\"data/models/span_test.m\")\n\n def save_model(self, path: str):\n \"\"\"\n Saves the current model at the given path\n\n :param path: The path to save the model at\n :return:\n \"\"\"\n\n torch.save(self.net.state_dict(), path)\n\n def load_model(self, path: str):\n \"\"\"\n Loads the model from a given path\n\n :param path: The path from where to load the model\n :return:\n \"\"\"\n\n self.net.load_state_dict(torch.load(path))\n","repo_name":"inception-project/framenet-tools","sub_path":"framenet_tools/span_identification/spanidnetwork.py","file_name":"spanidnetwork.py","file_ext":"py","file_size_in_byte":8233,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"}
+{"seq_id":"14754223673","text":"'''\n [문제]\n memberList는 회원목록데이터이다.\n number 는 회원 번호이다. \n id 는 회원아이디이다.\n\n itemList은 쇼핑몰 판매상품데이터이다.\n itemName 는 상품이름이다.\n price는 아이템 가격이다.\n \n orderList는 오늘 주문 목록이다. \n orderid 는 주문한 회원 id 이다.\n itemname 는 주문한 상품이름이다. \n count 는 주문한 상품개수이다. \n\n 각 회원별 주문 총액을 구하시오.\n [정답]\n {'id': 'qwer1234', 'total': 4400}\n {'id': 'pythongood', 'total': 28000}\n {'id': 'testid', 'total': 16000}\n'''\n\nmemberList = [\n {\"number\" : 1001 , \"id\" : \"qwer1234\" },\n {\"number\" : 1002 , \"id\" : \"pythongood\"},\n {\"number\" : 1003 , \"id\" : \"testid\"},\n]\nitemList = [\n {\"itemname\" : \"사과\" , \"price\" : 1100},\n {\"itemname\" : \"바나나\" , \"price\" : 2000},\n {\"itemname\" : \"딸기\" , \"price\" : 4300},\n]\norderList = [\n {\"orderid\" : \"qwer1234\" , \"itemname\" : \"사과\" , \"count\" : 3},\n {\"orderid\" : \"pythongood\" , \"itemname\" : \"딸기\" , \"count\" : 6},\n {\"orderid\" : \"testid\" , \"itemname\" : \"바나나\" , \"count\" : 1},\n {\"orderid\" : \"pythongood\" , \"itemname\" : \"사과\" , \"count\" : 2},\n {\"orderid\" : \"testid\" , \"itemname\" : \"바나나\" , \"count\" : 7},\n {\"orderid\" : \"qwer1234\" , \"itemname\" : \"사과\" , \"count\" : 1}, \n]\n\nresult = []\nfor i in range(len(memberList)):\n total = 0\n for j in range(len(orderList)):\n # 멤버아이디 별로 주문건 확인\n if memberList[i][\"id\"] == orderList[j][\"orderid\"] :\n item = orderList[j][\"itemname\"]\n itemCount = orderList[j][\"count\"]\n # 주문상품 토탈 가격 합산\n for p in range(len(itemList)):\n if item == itemList[p][\"itemname\"]:\n total += itemCount * itemList[p][\"price\"]\n result.append({})\n result[i][\"id\"] = memberList[i][\"id\"]\n result[i][\"total\"] = total\n\nfor i in range(len(result)):\n print(result[i])\n \n ","repo_name":"jomira0220/study","sub_path":"jomira/00_문법총정리/Python_문제풀기/문자열/문자열4/문자열4_문제02_상품매출.py","file_name":"문자열4_문제02_상품���출.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74139217769","text":"import warnings\nimport unittest\n\nfrom six import assertRegex\n\nfrom helpers import configuration\nfrom helpers.resources import resource\n\nfrom ionosenterprise.client import IPBlock, IonosEnterpriseService\nfrom ionosenterprise.errors import ICError, ICNotFoundError\n\n\nclass TestIPBlock(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n warnings.filterwarnings(\"ignore\", category=ResourceWarning,\n message=\"unclosed.*\")\n cls.resource = resource()\n cls.client = IonosEnterpriseService(\n username=configuration.USERNAME,\n password=configuration.PASSWORD,\n headers=configuration.HEADERS)\n\n ipblock1 = IPBlock(**cls.resource['ipblock'])\n ipblock1.size = 2\n cls.ipblock1 = cls.client.reserve_ipblock(ipblock1)\n\n ipblock2 = IPBlock(**cls.resource['ipblock'])\n cls.ipblock2 = cls.client.reserve_ipblock(ipblock2)\n\n @classmethod\n def tearDownClass(cls):\n cls.client.delete_ipblock(cls.ipblock1['id'])\n\n def test_list_ipblocks(self):\n ipblocks = self.client.list_ipblocks()\n\n assertRegex(self, ipblocks['items'][0]['id'], self.resource['uuid_match'])\n self.assertGreater(len(ipblocks['items']), 0)\n self.assertEqual(ipblocks['items'][0]['type'], 'ipblock')\n self.assertGreater(ipblocks['items'][0]['properties']['size'], 0)\n self.assertIn(ipblocks['items'][0]['properties']['location'], self.resource['locations'])\n\n def test_get_ipblock(self):\n ipblock = self.client.get_ipblock(self.ipblock1['id'])\n\n assertRegex(self, ipblock['id'], self.resource['uuid_match'])\n self.assertEqual(ipblock['id'], self.ipblock1['id'])\n self.assertEqual(ipblock['type'], 'ipblock')\n self.assertEqual(ipblock['properties']['name'], (self.resource['ipblock']['name']))\n self.assertEqual(ipblock['properties']['size'], 2)\n self.assertEqual(len(ipblock['properties']['ips']), 2)\n self.assertEqual(ipblock['properties']['location'], self.resource['ipblock']['location'])\n\n def test_delete_ipblock(self):\n ipblock = self.client.delete_ipblock(self.ipblock2['id'])\n\n self.assertTrue(ipblock)\n assertRegex(self, ipblock['requestId'], self.resource['uuid_match'])\n\n def test_reserve_ipblock(self):\n ipblock = self.client.reserve_ipblock(IPBlock(**self.resource['ipblock']))\n\n assertRegex(self, ipblock['id'], self.resource['uuid_match'])\n self.assertEqual(ipblock['properties']['name'], (self.resource['ipblock']['name']))\n self.assertEqual(ipblock['properties']['size'], self.resource['ipblock']['size'])\n self.assertEqual(ipblock['properties']['location'], self.resource['ipblock']['location'])\n\n self.client.delete_ipblock(ipblock['id'])\n\n def test_get_failure(self):\n try:\n self.client.get_ipblock('00000000-0000-0000-0000-000000000000')\n except ICNotFoundError as e:\n self.assertIn(self.resource['not_found_error'], e.content[0]['message'])\n\n def test_reserve_failure(self):\n try:\n ipblock = IPBlock(name=self.resource['ipblock']['name'], size=1)\n self.client.reserve_ipblock(ipblock)\n except ICError as e:\n self.assertIn(self.resource['missing_attribute_error'] % 'location',\n e.content[0]['message'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ionos-enterprise/ionos-enterprise-sdk-python","sub_path":"tests/test_ipblock.py","file_name":"test_ipblock.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"40401038649","text":"import os\nimport ujson\nimport itertools\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport dgl\n\nSUBTOKEN_NUM = 3507\nMAX_SUBTOKEN_LEN = 5\nDROPOUT = 0.1\nFEATURE_DIM = 64\nCLASS_NUM = 65\nBACKWARD = True\nGNN_LAYERS = 2\nTIMESTEPS = [7, 1]\nEDGE_TYPES = [[f'f{i}', f'b{i}', ] for i in range(8)]\nEDGE_TYPES = list(itertools.chain(*EDGE_TYPES))\n\n\ndef load_data(file=os.path.join(os.path.dirname(__file__), 'data_sample.json')):\n with open(file, 'r') as reader:\n data = ujson.load(reader)\n graph_dict = {}\n for idx, et in enumerate(['edges0', 'edges1', 'edges2', 'edges3', 'edges4', 'edges5', 'edges6', 'edges7']):\n src, dst = zip(*data[et])\n src, dst = torch.IntTensor(src), torch.IntTensor(dst)\n graph_dict[('node', f'f{idx}', 'node')] = (src, dst)\n graph_dict[('node', f'b{idx}', 'node')] = (dst, src)\n hetero_graph = dgl.heterograph(graph_dict)\n hetero_graph.ndata['subtokens'] = torch.IntTensor(data['nodes'])\n hetero_graph = hetero_graph\n assert hetero_graph.num_nodes('node') == len(data['nodes'])\n\n dropout = data['dropout']\n batch_size = data['batch_size']\n typed_annotation_node_ids = data['typed_annotation_node_ids']\n typed_annotation_target_class = data['typed_annotation_target_class']\n typed_annotation_pairs_are_equal = data['typed_annotation_pairs_are_equal']\n return hetero_graph, dropout, batch_size, typed_annotation_node_ids, typed_annotation_target_class, typed_annotation_pairs_are_equal\n\n\nclass GatedGNN(nn.Module):\n def __init__(self, edge_types, edge_in, edge_out, hidden_in, hidden_out,\n timestep, backward, dropout, ):\n super(GatedGNN, self).__init__()\n self.edge_types = edge_types\n self.timestep = timestep\n self.backward = backward\n self.dropout = dropout\n self.edge_weights = nn.ModuleDict({\n et: nn.Linear(edge_in, edge_out, bias=False)\n for et in self.edge_types\n })\n self.gru_cell = nn.GRUCell(hidden_in, hidden_out)\n\n def forward(self, graph, prev_key, residual_state=None):\n def edge_func(edges):\n edge_type = edges.canonical_etype[1]\n out = torch.dropout(\n self.edge_weights[edge_type](edges.dst[prev_key]),\n p=self.dropout, train=self.training,\n )\n return {'e': out}\n\n def message_func(edges):\n return {edges.canonical_etype[1]: edges.data['e']}\n\n reduce_funcs = {\n et: dgl.function.max(et, f'_{et}')\n for et in self.edge_types\n }\n\n prev_state = graph.ndata[prev_key]\n with graph.local_scope():\n for step in range(self.timestep):\n for et in self.edge_types:\n # compute edge type info with dst nodes and save them in edge\n graph[et].apply_edges(edge_func, etype=et)\n # aggregate info into nodes with different {edge type} keys\n graph[et].update_all(message_func, reduce_func=reduce_funcs[et], apply_node_func=None, etype=et)\n graph_feature = torch.stack([graph.ndata[f'_{et}'] for et in self.edge_types], dim=1).max(dim=1)[0]\n if residual_state is not None:\n graph_feature = torch.cat([residual_state, graph_feature], dim=-1)\n prev_state = self.gru_cell(graph_feature, prev_state)\n if step < self.timestep - 1:\n prev_state = torch.dropout(prev_state, p=self.dropout, train=self.training)\n return prev_state\n\n\nclass Typilus(nn.Module):\n def __init__(self,\n # node embedding\n vocab_size, embed_size, padding_idx,\n # ggnn\n edge_types, edge_in, edge_out, hidden_size,\n backward, layer_num, timesteps,\n dropout,\n # cls prediction\n cls_num,\n ):\n super().__init__()\n\n self.padding_idx = padding_idx\n self.dropout = dropout\n\n # embedding graphs\n self.node_embedding = nn.Embedding(vocab_size, embed_size, padding_idx)\n self.node_layer = nn.Sequential(\n nn.Dropout(self.dropout),\n nn.Linear(embed_size, embed_size, bias=False),\n nn.Dropout(self.dropout),\n )\n # 2 layer GGNN\n self.ggnns = nn.ModuleList([\n GatedGNN(edge_types=edge_types, edge_in=edge_in, edge_out=edge_out,\n hidden_in=(edge_out + hidden_size) if i > 0 else edge_out, hidden_out=hidden_size,\n backward=backward, timestep=timesteps[i], dropout=dropout)\n for i in range(layer_num)\n ])\n # 3 classify layer\n self.cls_layers = nn.Sequential(\n nn.Linear(hidden_size, hidden_size, bias=False),\n nn.Dropout(self.dropout),\n nn.Linear(hidden_size, cls_num),\n )\n\n def forward(self, graph):\n \"\"\"\n nodes: all nodes' subtokens. [N, 5]\n edges: edges between nodes. (dict) {edge type: node_{i} --> node_{j}}, e.g. {0: [E, 2]}\n \"\"\"\n # 1) nodes embedding\n nodes = graph.ndata.pop('subtokens').long()\n node_emb = self.node_embedding(nodes) # [B, L, E]\n node_emb_len = (nodes > self.node_embedding.padding_idx).sum(dim=-1, keepdim=True)\n node_emb = node_emb.sum(dim=1) / node_emb_len # mean(dim=1) => [B, E]\n node_emb = self.node_layer(node_emb) # [B, E]\n graph.ndata[0] = node_emb\n\n # 2) edges embedding\n for idx, ggnn in enumerate(self.ggnns, start=1):\n if idx > 1:\n residual_state = graph.ndata[idx - 2]\n else:\n residual_state = None\n graph.ndata[idx] = ggnn(graph, prev_key=idx - 1, residual_state=residual_state)\n last_state = graph.ndata[len(graph.ndata) - 1]\n return last_state\n\n def triplet_loss(self, repr, equal_ids, margin=2, eplison=1e-10):\n distance = torch.norm(repr.unsqueeze(dim=0) - repr.unsqueeze(dim=1), dim=-1, p=1) # B x B\n max_pos_distance = (distance * equal_ids).max(dim=-1)[0]\n neg_filter = distance <= (max_pos_distance + margin).unsqueeze(dim=-1)\n pos_mask = equal_ids + torch.eye(*equal_ids.size()).type_as(distance)\n neg_filter = neg_filter * (1 - pos_mask)\n avg_neg_distance = (distance * neg_filter).sum(dim=-1) / (neg_filter.sum(dim=-1) + eplison)\n min_neg_distance = (distance + pos_mask * 99999).min(dim=-1)[0]\n pos_filter = (distance >= (min_neg_distance - margin).unsqueeze(dim=-1)).float()\n pos_filter = pos_filter * equal_ids\n avg_pos_distance = (distance * pos_filter).sum(dim=-1) / (pos_filter.sum(dim=-1) + eplison)\n triplet_loss = 0.5 * torch.relu(avg_pos_distance - min_neg_distance + margin) + \\\n 0.5 * torch.relu(max_pos_distance - avg_neg_distance + margin)\n triplet_loss = triplet_loss.mean()\n return triplet_loss\n\n def ce_loss(self, repr, gt):\n repr = self.cls_layers(repr)\n repr = torch.log_softmax(repr, dim=-1)\n bsz = repr.size(0)\n ce_loss = F.cross_entropy(\n repr.view(bsz, -1),\n gt.view(-1),\n ignore_index=self.padding_idx,\n ) / bsz\n return ce_loss\n\n\nif __name__ == '__main__':\n typilus_model = Typilus(vocab_size=SUBTOKEN_NUM, embed_size=FEATURE_DIM, padding_idx=0,\n edge_types=EDGE_TYPES, backward=BACKWARD, layer_num=GNN_LAYERS, timesteps=TIMESTEPS,\n edge_in=FEATURE_DIM, edge_out=FEATURE_DIM, hidden_size=FEATURE_DIM,\n dropout=DROPOUT, cls_num=CLASS_NUM, )\n typilus_model = typilus_model\n\n hetero_graph, dropout, batch_size, typed_annotation_node_ids, typed_annotation_target_class, typed_annotation_pairs_are_equal = load_data()\n last_state = typilus_model(hetero_graph)\n last_state = last_state[typed_annotation_node_ids].contiguous() # B x E\n\n typed_annotation_pairs_are_equal = torch.Tensor(typed_annotation_pairs_are_equal).float().type_as(last_state)\n typed_annotation_target_class = torch.Tensor(typed_annotation_target_class).long().to(last_state.device)\n\n triplet_loss = typilus_model.triplet_loss(last_state, equal_ids=typed_annotation_pairs_are_equal)\n print(triplet_loss)\n ce_loss = typilus_model.ce_loss(last_state, gt=typed_annotation_target_class)\n print(ce_loss)\n # loss = triplet_loss + ce_loss\n # loss.backward()\n","repo_name":"CGCL-codes/naturalcc","sub_path":"run/type_prediction/typilus/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8562,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"}
+{"seq_id":"5472236106","text":"# /usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@File : common.py\n@Time : 2021/01/14 16:26:21\n@Author: Morker\n@Blog : https://96.mk/\n@Email : i@96.mk\n\nIf you don't go through the cold, you can't get the fragrant plum blossom.\n'''\n\nimport re\nimport random\nimport requests\nfrom config.config import USER_AGENTS\nfrom config.data import Proxys, Urls\n\n\ndef getLatestRevision():\n \"\"\"\n 获取版本信息\n \"\"\"\n headers = {\n \"User-Agent\": random.choice(USER_AGENTS),\n }\n readVersion = None\n try:\n req = requests.get(\n url=\"https://96.mk/Glass/Glass_Version.txt\", headers=headers)\n content = req.text\n readVersion = re.findall(\n \"Version\\s*=\\s*[\\\"'](?P[\\d.]+)\", content)\n except:\n pass\n\n return readVersion[0]\n\n\ndef getScheme():\n if Proxys.proxyList:\n for key in Proxys.proxyList:\n for i in key:\n host = key[i].split('/')[2]\n Proxys.scheme.append(host)\n","repo_name":"s7ckTeam/Glass","sub_path":"lib/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":897,"dataset":"github-code","pt":"53"}
+{"seq_id":"27722404218","text":"#!/usr/bin/python3\n\"\"\"\nWeb Server\n\"\"\"\n\nimport sys\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom socketserver import ThreadingMixIn\nfrom http import HTTPStatus\n\nimport util\nimport pirail_web as application\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\" Threaded HTTP Server \"\"\"\n\nclass MyHandler(BaseHTTPRequestHandler):\n \"\"\" Web Handler \"\"\"\n\n def do_GET(self):\n \"\"\"Respond to a GET request.\"\"\"\n\n url = urlparse(self.path)\n qsdict = parse_qs(url.query)\n\n for match in application.MATCHES:\n groups = match['pattern'].match(self.command + \" \" + url.path)\n if groups is not None:\n try:\n match['handler'](self, groups, qsdict)\n except BrokenPipeError as ex:\n print(\"ERROR: %s\" % ex)\n break\n else:\n self.send_error(HTTPStatus.NOT_FOUND, HTTPStatus.NOT_FOUND.description)\n\nif __name__ == \"__main__\":\n # MAIN START\n\n # Command Line Arguments\n try:\n HOST_NAME = ''\n PORT_NUMBER = int(sys.argv[1])\n except IndexError:\n PORT_NUMBER = 8080\n\n # Web Server\n util.web_server(HOST_NAME, PORT_NUMBER, ThreadedHTTPServer, MyHandler)\n","repo_name":"cpn18/track-chart","sub_path":"webserver/web_server.py","file_name":"web_server.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"32801678465","text":"import math\n\nimport numpy as np\nfrom sklearn.metrics import f1_score\n\nfrom pavel.keras_utils import f1\nfrom pavel.rnn_constants import *\nfrom keras.models import load_model\n\nprint(\"Loading data\")\ndataset = np.load(NUMPY_DATASET)\n\ntest_x = dataset[\"test_x\"]\ntest_y = dataset[\"test_y\"]\n\ntrain_x = dataset[\"train_x\"]\ntrain_y = dataset[\"train_y\"]\n\nprint(\"Loading model\")\nmdl = load_model(SAVED_MODEL, custom_objects={\"f1\": f1})\n\nprint(\"Compiling\")\nmdl.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=[\"accuracy\"])\nmdl.summary()\n\nprint(\"Predicting on train\")\npred_y = mdl.predict(train_x, batch_size=100)\n\npred_y = np.round(pred_y)\n\nprint(\"Total F1 score:\", f1_score(train_y, pred_y, average='micro'))\n\nprint(\"Predicting on test\")\npred_y = mdl.predict(test_x, batch_size=100)\n\npred_y = np.round(pred_y)\n\nprint(\"Total F1 score:\", f1_score(test_y, pred_y, average='micro'))\n","repo_name":"maxim-romanovsky/ds22","sub_path":"pavel/rnn_eval.py","file_name":"rnn_eval.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73218613609","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nimport pandas as pd\nfrom itertools import count\nfrom collections import defaultdict\nfrom time import time as current_time\nfrom robot_strategies.robot_bk_clingo import RobotMIL\nfrom east_west_trains.trains_bk_clingo import TrainsMIL\nfrom string_transformation.strings_bk_clingo import StringsMIL\n\n\nbenchmarks = [\n (\"trains\", TrainsMIL, \"east_west_trains/instances\"),\n (\"strings\", StringsMIL, \"string_transformation/instances_original\"),\n (\"robot\", RobotMIL, \"robot_strategies/instances\"),\n]\ntimes = {\n mode: {b[0]: defaultdict(dict) for b in benchmarks}\n for mode in [\"fc\", \"pfc\", \"ufc\", \"sa\"]\n}\n\n\ndef run_instance(milclass, mode, functional, instance, timeout):\n name = milclass.__name__\n mil = milclass()\n mil.load_examples(instance)\n\n start = current_time()\n for size in count(1):\n for skolems in range(0, size):\n mil.reset_control(size, skolems)\n print(\n f\"\\r{'GROUNDING':11s} {name} with {mode:3s} on {instance}: \"\n + f\"size {size}, skolems {skolems}\",\n end=\"\",\n )\n mil.ground(mode=mode, functional=functional)\n\n print(\n f\"\\r{'SOLVING':11s} {name} with {mode:3s} on {instance}: size \"\n + f\"{size}, skolems {skolems}\",\n end=\"\",\n )\n model = []\n with mil.solve(\n async_=True,\n on_model=lambda m: model.extend(m.symbols(shown=True)),\n ) as handle:\n terminated = False\n while not terminated and current_time() - start < timeout:\n terminated = handle.wait(5)\n if not terminated:\n handle.cancel()\n result = handle.get()\n time = current_time() - start\n\n if result.satisfiable or result.interrupted:\n sat = \"SATISFIABLE\" if result.satisfiable else \"TIMEOUT\"\n print(\n f\"\\r{sat:11s} {name} with {mode:3s} on {instance}: \"\n + f\"size {size}, skolems {skolems} {time:.1f} seconds\"\n )\n return result, model, time\n\n\ndef main():\n data = []\n for name, milclass, instances in benchmarks:\n is_robot = issubclass(milclass, RobotMIL)\n for examples in sorted(os.listdir(instances)):\n for mode in times.keys():\n if mode in [\"fc\", \"ufc\"] and is_robot:\n continue\n\n match = re.fullmatch(r\"instance(\\d+?)-(\\d+?).lp\", examples)\n size, num = match.group(1), match.group(2)\n\n result, model, time = run_instance(\n milclass,\n mode,\n is_robot,\n os.path.join(instances, examples),\n 60,\n )\n\n data.append(\n {\n \"mode\": mode,\n \"name\": name,\n \"size\": int(size),\n \"num\": int(num),\n \"time\": time,\n \"result\": str(result),\n }\n )\n pd.DataFrame(data).to_csv(\"times.csv\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"potassco/clingomil","sub_path":"run_benchmark.py","file_name":"run_benchmark.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1551310827","text":"def AMSGetIndex(item_list, value):\n\n if value == \"True\":\n value = True\n elif value == \"False\":\n value = False\n\n item_list = dict(item_list)\n index_number = None\n for i in item_list.keys():\n if i == value:\n index_number = list(item_list.keys()).index(value)\n\n # setattr(self.parent_class, self.index, self.ddl_value_index)\n return index_number","repo_name":"amanoli11/ams","sub_path":"libraries/amsgetindex.py","file_name":"amsgetindex.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72084652007","text":"import json\r\n\r\n# Load the JSON data from the file\r\nwith open(\"sample-data.json\") as file:\r\n data = json.load(file)\r\n\r\n# Extract the interface status information\r\ninterface_status = data[\"imdata\"]\r\n\r\n# Print the header\r\nprint(\"Interface Status\")\r\nprint(\"=\" * 80)\r\nprint(\"{:<50}{:<20}{:<10}{:<6}\".format(\"DN\", \"Description\", \"Speed\", \"MTU\"))\r\nprint(\"-\" * 80)\r\n\r\n# Iterate over the interface status data and print the information\r\nfor item in interface_status:\r\n attributes = item[\"l1PhysIf\"][\"attributes\"]\r\n dn = attributes[\"dn\"]\r\n description = attributes[\"descr\"]\r\n speed = attributes[\"speed\"]\r\n mtu = attributes[\"mtu\"]\r\n print(\"{:<50}{:<20}{:<10}{:<6}\".format(dn, description, speed, mtu))","repo_name":"mirasmbsr/lab6","sub_path":"json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11242902116","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndata_set = pd.read_csv('Lab 5/unclean-wine-quality.csv')\r\n\r\n# Question 1\r\n#drop first column\r\ndata_set = data_set.drop(data_set.columns[0], axis=1)\r\n\r\nlabels = data_set.iloc[:,11]\r\ndata = data_set.iloc[:,0:11]\r\n\r\n# use np.where to and sum to count number of - and NaN values\r\nprint('\\nQuestion 1: \\n')\r\nprint('number of - values: ', np.sum(np.where(data == '-', 1, 0)))\r\nprint ('Number of NaN values: ', np.sum(np.where(data.isnull(), 1, 0)))\r\n\r\n# Print the indicies of the - values in the dataset\r\nprint('Indicies of - values: \\n', np.where(data == '-'))\r\nprint ('Indicies of NaN values: \\n', np.where(data.isnull()))\r\n\r\n# Change all - values to NaN\r\nprint('\\nReplacing all - values with NaN')\r\ndata = data.replace('-', np.nan)\r\n\r\n# Print number of null values in the dataset\r\nprint ('Number of NaN values: ', np.sum(np.where(data.isnull(), 1, 0)))\r\n\r\n# Change all values in the data set to float64\r\ndata = data.astype('float64')\r\n\r\n# Question 2\r\n# Filling missing values with a constant value\r\nprint ('\\nQuestion 2: \\n')\r\ndata2 = data\r\ndata2 = data2.fillna({'fixed acidity': 0})\r\ndata2 = data2.fillna({'volatile acidity': 0})\r\ndata2 = data2.fillna({'citric acid': 0})\r\ndata2 = data2.fillna({'residual sugar': 0})\r\ndata2 = data2.fillna({'chlorides': 1})\r\ndata2 = data2.fillna({'free sulfur dioxide': 0})\r\ndata2 = data2.fillna({'total sulfur dioxide': 0})\r\ndata2 = data2.fillna({'density': 0})\r\ndata2 = data2.fillna({'pH': 1})\r\ndata2 = data2.fillna({'sulphates': 1})\r\ndata2 = data2.fillna({'alcohol': 0})\r\n\r\nprint ('Number of NaN values after replacement: ', np.sum(np.where(data2.isnull(), 1, 0)))\r\n\r\n# Question 3\r\nprint('\\nQuestion 3: \\n')\r\ndata3 = data.fillna(method='ffill')\r\nprint('Sample-and-hold filling: \\n', data3.iloc[16:19,0])\r\n\r\n# Question 4\r\nprint('\\nQuestion 4: \\n')\r\ndata4 = data.interpolate(method='linear')\r\nprint('Linear interpolation: \\n', data4.iloc[16:19,0])\r\n\r\n# Question 5\r\nprint('='*100)\r\nprint('\\nQuestion 5: \\n')\r\nnoisy_data = pd.read_csv('Lab 5/noisy-sine.csv')\r\n# apply an moving average filter on the noisy_data with window size 5, 31 and 51\r\n# then plot the original noisy_data along with result of the three moving average filters\r\n\r\n# window size 5 \r\nnoisy_data5 = noisy_data.rolling(window=5).mean()\r\n# window size 31\r\nnoisy_data31 = noisy_data.rolling(window=31).mean()\r\n# window size 51\r\nnoisy_data51 = noisy_data.rolling(window=51).mean()\r\n\r\n# plot the original noisy_data along with result of the three moving average filters\r\nplt.plot(noisy_data, label='noisy-sine')\r\nplt.plot(noisy_data5, label='moving_average_5')\r\nplt.plot(noisy_data31, label='moving_average_31')\r\nplt.plot(noisy_data51, label='moving_average_51')\r\nplt.legend()\r\nplt.show()\r\n\r\n#Question 6\r\n# i)\r\ndataset = pd.read_csv(\"C:\\\\Users\\\\miles\\\\OneDrive - Queen's University\\\\Eng Year 3 - 2022-2023\\\\Sem 2\\\\ELEC 390\\\\Lab5 - Pre-Processing\\\\ECG-sample.csv\", on_bad_lines='skip', header = None)\r\n\r\nfig, ax = plt.subplots()\r\ndataset.iloc[:].plot(ax=ax, linewidth=3)\r\n\r\nax.set_title('ECG Sample', fontsize=15)\r\nax.set_xlabel('Number of the window')\r\nax.set_ylabel('Value of the std')\r\nax.set_ylim(-0.005,0.31)\r\nplt.show()\r\n\r\n# ii)\r\nfeatures = pd.DataFrame(columns=['mean', 'std', 'max', 'min'])\r\nwindow_size = 31\r\nfeatures['mean'] = dataset.iloc[:].rolling(window=window_size).mean()\r\nfeatures['std'] = dataset.iloc[:].rolling(window=window_size).std()\r\nfeatures['max'] = dataset.iloc[:].rolling(window=window_size).max()\r\nfeatures['min'] = dataset.iloc[:].rolling(window=window_size).min()\r\nfeatures = features.dropna()\r\nprint(features)\r\n\r\n# iii)\r\nfeatures['std'].plot()\r\nplt.show()\r\n","repo_name":"miodrag4/ELEC-390-Applied-Data-Science","sub_path":"Lab5 - Pre-Processing/Lab5.py","file_name":"Lab5.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10455162448","text":"\"\"\"\n一些排序算法\n\"\"\"\n\ndef bubble_sort_v1(data: []):\n \"\"\"\n 原始的冒泡排序算法\n \"\"\"\n if len(data)==0:\n print(\"空数组!!!\")\n return\n # 总共需要排序轮次\n for i in range(len(data)-1):\n # 每轮次需要比较的次数\n for j in range(len(data)-1-i):\n if data[j]>data[j+1] :\n tmp = data[j]\n data[j] = data[j+1]\n data[j+1] = tmp\n\ndef bubble_sort_v2(data : []):\n \"\"\"\n 改进的冒泡排序算法\n 改进点:若已经有序,则提前终止,减少无用的循环次数\n \"\"\"\n if len(data)==0:\n print(\"空数组!!!\")\n return\n for i in range(len(data)-1):\n # 先将flag置位\n is_sorted = True\n for j in range(len(data)-1-i):\n if data[j]>data[j+1]:\n tmp = data[j]\n data[j] = data[j+1]\n data[j+1] = tmp\n # 发生了交换,说明还不能肯定完全有序\n is_sorted = False\n # 如上一轮次没有发生交换,则说明数组已经有序\n if is_sorted:\n break\n\ndef bubble_sort_v3(data : []):\n \"\"\"\n 改进的冒泡排序算法\n 改进点1:若数组已经有序,则提前终止\n 改进点2:记录最后一次元素交换的位置,作为无序区边界\n \"\"\"\n if len(data)==0:\n print(\"空数组!!!\")\n return\n # 无序区边界\n set_border = len(data)-1\n # 记录最后一次交换位置\n last_swap_index = 0\n for _ in range(len(data)-1):\n # 先将flag置位\n is_sorted = True\n for j in range(set_border):\n if data[j]>data[j+1]:\n tmp = data[j]\n data[j] = data[j+1]\n data[j+1] = tmp\n # 发生了交换,说明还不能肯定完全有序\n is_sorted = False\n last_swap_index = j\n # 设置无序区边界\n set_border = last_swap_index\n if is_sorted:\n break\n\ndef cock_tail_sort(data : []):\n \"\"\"\n 鸡尾酒排序(秘诀就是摇晃)\n \"\"\"\n if len(data)==0:\n print(\"空数组!!!\")\n return\n for i in range(len(data)//2):\n # 有序标记 初始为True\n # 偶数轮次(0 开始)\n is_sorted = True\n for j in range(i,len(data)-1-i):\n if data[j]>data[j+1]:\n tmp = data[j]\n data[j] = data[j+1]\n data[j+1] = tmp\n is_sorted = False\n if is_sorted:\n break\n # 奇数轮次(1 开始)\n is_sorted = True\n # 这边较原始代码改进了一点,就是在偶数轮次结束后,无序区长度应该减少了1,所以这里减2\n for j in range(len(data)-2-i,i,-1):\n if data[j]=end_index:\n return\n # 定位基准元素放置位置\n pivot_index = partition_v2(start_index,end_index,array)\n # 分治\n quick_sort(start_index,pivot_index-1,array)\n quick_sort(pivot_index+1,end_index,array)\n\ndef quick_sort_stack(start_index: int,end_index: int,array: []):\n \"\"\"\n 快速排序(栈实现)\n \"\"\"\n # ��建一个栈,保存递归调用函数的参数\n stack = []\n # 整个数组的起始和终止下标,以key-value形式入栈\n root_param = {\"start_index\":start_index, \"end_index\":end_index}\n stack.append(root_param)\n # 循环结束条件: 栈为空时结束\n while len(stack)>0:\n # 栈顶元素出栈,得到起始和终止下标\n param = stack.pop()\n pivot_index = partition_v2(param.get(\"start_index\"),param.get(\"end_index\"),array)\n # 根据基准元素位置,把剩下数组划分为两部分,并将每部分的起止下标入栈\n if param.get(\"start_index\")pivot_index+1:\n right_param = {\"start_index\":pivot_index+1, \"end_index\":param.get(\"end_index\")}\n stack.append(right_param)\n\ndef partition_v1(start_index: int,end_index: int,array: []):\n \"\"\"\n 双边循环法,定位基准元素放置位置(快排)\n \"\"\"\n # 选择数组首元素作为基准元素\n pivot = array[start_index]\n left = start_index # 左指针\n right = end_index # 右指针\n while(left != right):\n # 先尝试移动右指针\n while leftpivot:\n right-=1\n # 尝试移动左指针\n while leftarray[index*2+2] else index*2+2\n if array[index] 0.0\n \n new_pres = torch.clamp(depth_preds[fg_mask],1e-5,1.0-(1e-5))\n\n with autocast(enabled=False):\n depth_loss = (F.binary_cross_entropy(\n new_pres,\n # depth_preds[fg_mask],\n depth_labels[fg_mask],\n reduction='none',\n ).sum()\n / max(1.0, fg_mask.sum())\n )\n\n output = OrderedDict()\n output[\"singletask_bev_loss\"] = [\n bev_depth_loss_coeff * depth_loss + detection_loss\n ]\n return output\n\n def get_downsampled_gt_depth(self, gt_depths):\n \"\"\"\n Input:\n gt_depths: [B, N, H, W]\n Output:\n gt_depths: [B*N*h*w, d]\n \"\"\"\n B, N, H, W = gt_depths.shape\n gt_depths = gt_depths.view(\n B * N,\n H // self.downsample_factor,\n self.downsample_factor,\n W // self.downsample_factor,\n self.downsample_factor,\n 1,\n )\n gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous()\n gt_depths = gt_depths.view(\n -1, self.downsample_factor * self.downsample_factor\n )\n gt_depths_tmp = torch.where(\n gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths\n )\n gt_depths = torch.min(gt_depths_tmp, dim=-1).values\n gt_depths = gt_depths.view(\n B * N, H // self.downsample_factor, W // self.downsample_factor\n )\n\n gt_depths = (\n gt_depths - (self.dbound[0] - self.dbound[2])\n ) / self.dbound[2]\n gt_depths = torch.where(\n (gt_depths < self.depth_channels + 1) & (gt_depths >= 0.0),\n gt_depths,\n torch.zeros_like(gt_depths),\n )\n gt_depths = F.one_hot(\n gt_depths.long(), num_classes=self.depth_channels + 1\n ).view(-1, self.depth_channels + 1)[:, 1:]\n\n return gt_depths.float()\n","repo_name":"xingyun-xy/cap","sub_path":"cap/models/structures/bev_matrixvt.py","file_name":"bev_matrixvt.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19208078132","text":"from ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.ofproto import ofproto_v1_3\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import ether_types\nfrom ryu.lib.packet import ipv4\nfrom ryu.lib.packet import tcp\nfrom ryu.lib.packet import udp\nimport random\nimport pdb\nimport time\nimport threading\n\nfrom auto_gym import AutoEnv\n\nclass SimpleSwitch13(app_manager.RyuApp):\n OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]\n\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch13, self).__init__(*args, **kwargs)\n self.mac_to_port = {}\n \n # format: {(srcip,dstip,srcport,dstport,proto):[active,priority,fct,flow size]}\n self.flow_set = {}\n self.flow_old = {}\n # format: {(srcip, dstip, srcport, dstport, proto) : n}\n # n added by 1 when packet not coming,\n # if n > N, then flow finished.\n self.timeout = {}\n self.TIMEOUT_COUNT = 10\n conf = {\"flow\": self.flow_set,\n \"old_flow\": self.flow_old,\n \"index_len\": 5,\n \"val_len\": 4\n }\n # self.env = AutoEnv(conf)\n\n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n def switch_features_handler(self, ev):\n datapath = ev.msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n # install table-miss flow entry\n #\n # We specify NO BUFFER to max_len of the output action due to\n # OVS bug. At this moment, if we specify a lesser number, e.g.,\n # 128, OVS will send Packet-In with invalid buffer_id and\n # truncated packet data. In that case, we cannot output packets\n # correctly. The bug has been fixed in OVS v2.1.0.\n match = parser.OFPMatch()\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER)]\n self.add_flow(datapath, 0, match, actions)\n self.hard_coding_routing(ev.msg.datapath)\n stat_t = threading.Thread(target=self.period_flow_stats_send, args=(ev,))\n stat_t.start()\n print(\"INFO1:\", datapath,parser, ofproto)\n\n def add_flow(self, datapath, priority, match, actions, buffer_id=None, meter_id=0, idle_timeout=0):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n if meter_id:\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,actions), parser.OFPInstructionMeter(1, ofproto.OFPIT_METER)]\n else:\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if buffer_id:\n mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id, command=ofproto.OFPFC_ADD, flags=ofproto.OFPFF_SEND_FLOW_REM,\n priority=priority, match=match,\n instructions=inst, idle_timeout=idle_timeout)\n else:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority, command=ofproto.OFPFC_ADD, flags=ofproto.OFPFF_SEND_FLOW_REM,\n match=match, instructions=inst, idle_timeout=idle_timeout)\n datapath.send_msg(mod)\n\n def add_meter(self, datapath):\n parser = datapath.ofproto_parser\n ofproto = datapath.ofproto\n bands = [parser.OFPMeterBandDrop(type_=ofproto.OFPMBT_DROP, len_=0, rate=1000, burst_size=100)]\n req = parser.OFPMeterMod(datapath=datapath, command=ofproto.OFPMC_ADD, flags=ofproto.OFPMF_KBPS, meter_id=1, bands=bands)\n datapath.send_msg(req)\n pass\n\n def period_flow_stats_send(self, ev):\n print(\"Sending periodically\")\n datapath = ev.msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n match = parser.OFPMatch()\n while True:\n req = parser.OFPFlowStatsRequest(datapath, 0, ofproto.OFPTT_ALL,\n ofproto.OFPP_ANY, ofproto.OFPG_ANY, match=match)\n datapath.send_msg(req)\n time.sleep(10)\n\n @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)\n def period_flow_stats_reply(self, ev):\n flows = []\n for stat in ev.msg.body:\n flows.append(stat)\n print(\"Flows: \", flows)\n\n def interact_with_agent(self):\n pass\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def _packet_in_handler(self, ev):\n # If you hit this you might want to increase\n # the \"miss_send_length\" of your switch\n if ev.msg.msg_len < ev.msg.total_len:\n self.logger.debug(\"packet truncated: only %s of %s bytes\",\n ev.msg.msg_len, ev.msg.total_len)\n\n msg = ev.msg\n datapath = msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n in_port = msg.match['in_port']\n\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocols(ethernet.ethernet)[0]\n\n print(ether_types.ETH_TYPE_IP, \"Come a packet: \", pkt.get_protocols(ethernet.ethernet))\n return \n\n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n dst = eth.dst\n src = eth.src\n\n out_queue = 1\n # if pkt.get_protocols(ipv4.ipv4):\n # # count the statistics of flow\n # ip = pkt.get_protocols(ipv4.ipv4)[0]\n # if ip.proto == 6 or ip.proto == 17:\n # if ip.proto == 6:\n # tcpudp = pkt.get_protocols(tcp.tcp)[0]\n # if ip.proto == 17:\n # tcpudp = pkt.get_protocols(udp.udp)[0]\n # index = (ip.src, ip.dst, tcpudp.src_port, tcpudp.dst_port, ip.proto)\n # print(index)\n # # update timeout count\n # self.timeout[index] = 0\n # if index in self.flow_set:\n # out_queue = self.flow_set[index][1]\n # self.flow_set[index][2] += 1\n # self.flow_set[index][3] += ip.total_length\n # else:\n # self.flow_set[index] = [True, random.randint(0,3), 1, ip.total_length]\n # for idx in self.timeout:\n # self.timeout[idx] += 1\n # if self.timeout[idx] > self.TIMEOUT_COUNT:\n # del self.timeout[idx]\n # self.flow_set[idx][0] = False\n # print(pkt.protocols)\n # print(self.flow_set)\n # print(self.timeout)\n\n\n dpid = datapath.id\n self.mac_to_port.setdefault(dpid, {})\n\n # log infomation\n # self.logger.info(\"packet in %s %s %s %s\", dpid, src, dst, in_port)\n\n # learn a mac address to avoid FLOOD next time.\n self.mac_to_port[dpid][src] = in_port\n\n if dst in self.mac_to_port[dpid]:\n out_port = self.mac_to_port[dpid][dst]\n else:\n out_port = ofproto.OFPP_FLOOD\n\n queue_id = out_queue\n actions = []\n actions.append(parser.OFPActionSetQueue(queue_id))\n actions.append(parser.OFPActionOutput(out_port))\n\n # install a flow to avoid packet_in next time\n if out_port != ofproto.OFPP_FLOOD:\n match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)\n # verify if we have a valid buffer_id, if yes avoid to send both\n # flow_mod & packet_out\n if msg.buffer_id != ofproto.OFP_NO_BUFFER:\n self.add_flow(datapath, 1, match, actions, msg.buffer_id, idle_timeout=60)\n print(\"I am here\")\n return\n else:\n self.add_flow(datapath, 1, match, actions, idle_timeout=60)\n print(\"I am here2\")\n data = None\n if msg.buffer_id == ofproto.OFP_NO_BUFFER:\n data = msg.data\n\n out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,\n in_port=in_port, actions=actions, data=data)\n datapath.send_msg(out)\n\n def hard_coding_routing(self, datapath):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n # bands = [parser.OFPMeterBandDrop(type_=ofproto.OFPMBT_DROP, len_=0, rate=1000, burst_size=100)]\n # bands = [parser.OFPMeterBandDrop(rate=1000, burst_size=100)]\n # req = parser.OFPMeterMod(datapath=datapath, command=ofproto.OFPMC_ADD, flags=ofproto.OFPMF_KBPS, meter_id=1, bands=bands)\n # print(\"REQ: \", req)\n # datapath.send_msg(req)\n\n buffer_id = None\n meter_id = 0\n priority = 1\n # actions = [parser.OFPActionSetQueue(queue_id),parser.OFPActionOutput(2)]\n actions = [parser.OFPActionOutput(2)]\n match = parser.OFPMatch(in_port=1)\n\n # print(match, actions)\n if meter_id:\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,actions), parser.OFPInstructionMeter(1)]\n else:\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if buffer_id:\n mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,\n priority=priority, match=match,\n instructions=inst)\n else:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority,\n match=match, instructions=inst)\n datapath.send_msg(mod)\n\n meter_id = 0\n actions = [parser.OFPActionOutput(1)]\n match = parser.OFPMatch(in_port=2)\n # print(match, actions)\n if meter_id:\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,actions), parser.OFPInstructionMeter(1)]\n else:\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if buffer_id:\n mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,\n priority=priority, match=match,\n instructions=inst)\n else:\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority,\n match=match, instructions=inst)\n datapath.send_msg(mod)","repo_name":"lab821/SDN-DRL","sub_path":"demo/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":10513,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"14086291230","text":"n = input()\nl = list(map(int,raw_input().split()))\nm = 0\nmi = n-1\nfor i in range(n):\n\tif l[i]> l[m]:\n\t\tm=i\n\tif l[i]<=l[mi]:\n\t\tmi = i\nif mi>m:\n\tprint (n+m-mi-1)\nelse:\n\tprint (n+m-mi-2)","repo_name":"aniket0702/Codeforces-Solution","sub_path":"arrival_of_the_general.py","file_name":"arrival_of_the_general.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"34716525310","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom oilserver.models import TestCaseInstanceStatus\n\ndef add_skipped_testcaseinstancestatus(apps, schema_editor):\n ''' Add the \"skipped\" status to the testcaseinstancestatus.'''\n skipped = TestCaseInstanceStatus(name='skipped',\n description='The build or test was skipped.')\n skipped.save()\n return skipped\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('oilserver', '0029_auto_20160614_1636'),\n ]\n operations = [\n migrations.RunPython(\n add_skipped_testcaseinstancestatus\n ),\n ]\n","repo_name":"autonomouse/dashboard","sub_path":"weebl/oilserver/migrations/0030_add_skipped_testcaseinstancestatus.py","file_name":"0030_add_skipped_testcaseinstancestatus.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"3734622470","text":"import argparse\n\nfrom trainer import Trainer\nfrom utils import set_deterministic\n\n\ndef main(parser):\n args = parser.parse_args()\n set_deterministic()\n\n trainer = Trainer(\n model_path=args.model_path,\n epochs=args.epochs,\n train_dir=args.train_dir,\n batch_size=args.batch_size,\n use_wandb=args.use_wandb,\n )\n\n trainer.run()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batch-size\", type=int, default=16)\n parser.add_argument(\"--model-path\", type=str,\n default=\"/Users/adilkhansarsen/Documents/work/AudioMNIST/checkpoints/checkpoint.pth\")\n parser.add_argument(\"--epochs\", type=int, default=100)\n parser.add_argument(\"--train-dir\", type=str, default=\"data/train.csv\")\n parser.add_argument(\"--use-wandb\", type=bool, default=False)\n main(parser)\n","repo_name":"adolkhan/AudioMNIST","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"13760510709","text":"import pandas as pd\nimport numpy as np\nimport time\nimport joblib\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom matplotlib import pyplot as plt\n\n#See deck.py\nclass Card:\n def __init__(self, card):\n self.suit = card[0]\n self.value = int(card[1:])\n\n def order(self, trump_suit):\n if self.suit == trump_suit:\n return [8, 9, 14, 12, 15, 10, 11, 13][self.value - 7]\n return [0, 1, 2, 6, 3, 4, 5, 7][self.value - 7]\n\n def points(self, trump_suit):\n if self.suit == trump_suit:\n return [0, 0, 14, 10, 20, 3, 4, 11][self.value - 7]\n return [0, 0, 0, 10, 2, 3, 4, 11][self.value - 7]\n\nsuits = ['k', 'h', 'r', 's']\nvalues = [7, 8, 9, 10, 11, 12, 13, 14]\npossible_street = []\nfor suit in suits:\n for idx in range(7,13):\n cards = {suit + str(idx), suit + str(idx + 1), suit + str(idx + 2)}\n possible_street.append(cards) \n\ndef transform_cards(cards):\n #Creat class instance of each card\n if type(cards) == float:\n return cards\n transformed_cards = []\n for card in cards:\n transformed_cards.append(Card(card))\n return transformed_cards\n\ndef transform_back(cards):\n #Create normal cards from class instances\n transformed_cards = []\n if type(cards) == float:\n return cards\n try:\n for card in cards:\n transformed_cards.append(card.suit + str(card.value))\n return transformed_cards\n except:\n return [cards.suit + str(cards.value)]\n\ndef get_asked_suit(center, trump):\n #Return the asked suit based on the first card of center.\n if type(center) == float:\n return np.nan\n if center[0].suit == trump:\n return '1'\n return '2'\n\ndef player_has_trump(hascolor, suit):\n #Checks whether a player still has trump suit\n suit_index = suits.index(suit)\n return int(hascolor[suit_index])\n\ndef len_center(center):\n #Returns the length of the center\n if type(center) == float:\n return 0\n return len(center)\n\ndef get_playable_cards(cards, bits):\n #Returns the cards that are allowed to be played.\n playable_cards = []\n for index, bit in enumerate(bits[0]):\n if bit == '1':\n playable_cards.append(cards[index])\n return playable_cards\n\ndef check_has_higher(center, cards, trump):\n #Returns whether there is a card that is higher than the cards in center and the card that are higher.\n has_higher = 0\n higher_cards = []\n if type(center) != float:\n highest = center[0]\n leading_suit = center[0].suit\n for card in center:\n if (card.order(trump) > highest.order(trump) and\n (card.suit == leading_suit or\n card.suit == trump)):\n highest = card\n highest_value = highest.order(trump)\n for card in cards:\n if (card.order(trump) > highest_value and\n (card.suit == leading_suit or \n card.suit == trump)):\n has_higher = 1\n higher_cards.append(card)\n return has_higher, higher_cards\n else:\n return 1, cards \n \ndef teammate_winning(center, trump, center_length):\n #Checks whether teammate is winning\n leading_suit = center[0].suit\n highest_card = center[0]\n for card in center:\n if (card.order(trump) > highest_card.order(trump) and\n (card.suit == leading_suit or \n card.suit == trump)):\n highest_card = card\n card_index = center.index(highest_card)\n\n if center_length == 2 and card_index == 0:\n return 1\n elif center_length == 3 and card_index == 1:\n return 1\n return 0\n\ndef get_highest_cards(cardsplayed0, cardsplayed1, cardsplayed2, cardsplayed3):\n #Get for each suit the two highest cards that are still in game\n played_cards = get_played_cards(cardsplayed0, cardsplayed1, cardsplayed2, cardsplayed3)\n highest_cards = []\n for i in range(4):\n cards_played = [card.value for card in played_cards[i]]\n cards_left = [x for x in range(7,15) if x not in cards_played]\n if len(cards_left) > 1:\n highest_cards.append([cards_left[-1], cards_left[-2]])\n elif len(cards_left) > 0:\n highest_cards.append([cards_left[-1]])\n else:\n highest_cards.append([])\n return highest_cards\n\ndef get_played_cards(cardsplayed0, cardsplayed1, cardsplayed2, cardsplayed3):\n #Creates a list of lists of all cards that are played sorted by suit.\n dictionary = {'cardsplayed0': cardsplayed0, 'cardsplayed1': cardsplayed1, 'cardsplayed2': cardsplayed2, 'cardsplayed3': cardsplayed3}\n played_cards = [[],[],[],[]]\n for i in range(4):\n if type(dictionary['cardsplayed{}'.format(i)]) != float:\n for card in dictionary['cardsplayed{}'.format(i)]:\n index = suits.index(card.suit)\n played_cards[index].append(card)\n return played_cards\n\ndef check_has_card(cards, center, highest_cards, order):\n #Checks whether player has the highest (order = 0) or second highest card (order = 1).\n if len_center(center) > 0:\n suit = center[0].suit\n index = suits.index(suit)\n #The second highest card sometimes does not exist.\n try:\n for card in cards:\n if card.value == highest_cards[index][order] and card.suit == suit:\n return 1\n return 0\n except:\n return 0\n else:\n return np.nan\n\ndef check_can_create_street(center, cards):\n #Gets cards that can creeate a street\n cards_to_create_street = []\n for card in cards:\n possible_street = check_possible_street(center, card)\n if possible_street:\n cards_to_create_street.append(card)\n return cards_to_create_street\n\ndef check_possible_street(center, card):\n #Checks whether there is a street in the combination of center with card.\n cards = []\n for center_card in center:\n cards.append(center_card.suit + str(center_card.value))\n cards.append(card.suit + str(card.value))\n for meld in possible_street:\n if meld.issubset(set(cards)):\n return 1\n return 0\n\ndef still_in_game(center, highest_cards, has_highest):\n #Checks whether the highest card of a suit in still in the game and not in the center\n if has_highest == 1:\n return 0\n if len_center(center) == 0:\n return 1\n try:\n index = suits.index(center[0].suit)\n for card in center:\n if card.suit == center[0].suit:\n if card.value == highest_cards[index][0]:\n return 0\n return 1\n except:\n return 1\n\ndef player_has_suit(len_center, center, hascolor):\n #Checks whether the player has a asked suit\n if len_center == 0:\n return np.nan\n suit_index = suits.index(center[0].suit)\n return int(hascolor[suit_index])\n\ndef check_has_card_empty_center(card, highest_cards, order):\n #Checks whether the player has the highest or second highest card given a empty center.\n index = suits.index(card.suit)\n try:\n if card.value == highest_cards[index][order]:\n return 1\n return 0\n except:\n return 0\n \ndef check_is_card(card, highest_cards, order):\n #Checks whether this card is the highest or second highest card\n try:\n index_suit = suits.index(card.suit)\n if card.value == highest_cards[index_suit][order]:\n return 1\n return 0\n except:\n return 0\n\ndef get_lowest_value(cards, trump):\n #Get lowest value of all cards in hand\n lowest_value = 21\n for card in cards:\n if card.points(trump) < lowest_value:\n lowest_value = card.points(trump)\n return lowest_value\n\ndef get_highest_value(cards, trump):\n #Get highest value of all cards in hand\n highest_value = 0\n for card in cards:\n if card.points(trump) > highest_value:\n highest_value = card.points(trump)\n return highest_value\n\ndef get_player_suit(center, card, trump):\n #Returns the suit of the current card\n if card.suit == trump:\n return '2'\n if len_center(center) > 0:\n if center[0].suit == card.suit:\n return '1'\n return '0'\n\ndef player_has_suit_no_center(card, hascolor):\n #Checks whether an opponent has a certain suit given the suit of the card\n suit_index = suits.index(card.suit)\n return int(hascolor[suit_index])\n\ndef get_second_element(row):\n return row[1]\n\ndef current_player_has_suit(cards, suit):\n for card in cards:\n if card.suit == suit:\n return 1\n return 0\n\ndef get_suit_center(center, trump):\n dictionary = {'card1' : np.nan, 'card2' : np.nan}\n for i in range(1, len(center)):\n if center[i].suit == center[0].suit:\n dictionary['card{}'.format(i)] = '1'\n elif center[i].suit == trump:\n dictionary['card{}'.format(i)] = '2'\n else:\n dictionary['card{}'.format(i)] = '0'\n return [dictionary['card1'], dictionary['card2']]\n\nstart_time = time.time()\n\nprint('start')\n\nplay_data = pd.read_csv('Play.csv', usecols=['Cards', 'Center', 'CardsPlayed0', 'CardsPlayed1', 'CardsPlayed2', 'CardsPlayed3', 'HasColor0', 'HasColor1', 'HasColor2', 'HasColor3', 'PlayCard', 'Troef', 'Variant', 'PlayableCardBits'], dtype={'HasColor0' : str, 'HasColor1' : str, 'HasColor2' : str, 'HasColor3' : str})\n\n#Filter out Rotterdam Version\nrotterdam_data = play_data[play_data['Variant'] == 'Rotterdams'].loc[:, ['Cards', 'Center', 'CardsPlayed0', 'CardsPlayed1', 'CardsPlayed2', 'CardsPlayed3', 'HasColor0', 'HasColor1', 'HasColor2', 'HasColor3', 'Troef', 'PlayableCardBits', 'PlayCard' ]].reset_index(drop=True)\n\ndel play_data\n\n#Save the original hand for debugging purposes\nrotterdam_data['Original_hand'] = rotterdam_data['Cards']\n\n#Create classes from given cards\nrotterdam_data['Cards'] = rotterdam_data['Cards'].str.replace('.', '', regex=True).str.split().map(lambda x: transform_cards(x))\nrotterdam_data['Center'] = rotterdam_data['Center'].str.replace('.', '', regex=True).str.split().map(lambda x: transform_cards(x))\nfor i in range(4):\n rotterdam_data['CardsPlayed{}'.format(i)] = rotterdam_data['CardsPlayed{}'.format(i)].str.split().map(lambda x: transform_cards(x))\n\n#Filter not already played cards and split it into a list\nrotterdam_data['PlayableCardBits'] = rotterdam_data['PlayableCardBits'].str.replace('.', '', regex=True).str.split()\n\nprint('1')\n#Filter out only playable cards\nrotterdam_data['Cards'] = rotterdam_data.apply(lambda x: get_playable_cards(x['Cards'], x['PlayableCardBits']), axis = 1)\n\n#Filter out suit of Trump\nrotterdam_data['Troef'] = rotterdam_data['Troef'].str[0]\n\n#Get the suit of the first played card. 0 if no card in center. 1 if suit is equal to trump, 2 otherwise.\nrotterdam_data['Asked_suit'] = rotterdam_data.apply(lambda x: get_asked_suit(x['Center'], x['Troef']), axis = 1)\nprint('2')\n\n#Get number of playable cards\nrotterdam_data['No_playable_cards'] = rotterdam_data.apply(lambda x: len(x['Cards']), axis = 1)\n\n#Get length of center. Used as input for other functions\nrotterdam_data['len_center'] = rotterdam_data.apply(lambda x: len_center(x['Center']), axis = 1)\n\n#Get player turn\nrotterdam_data['Player_turn'] = rotterdam_data['len_center'].astype(str)\nprint('3')\n\n#Check whether teammate has played\nrotterdam_data['Teammate_played'] = rotterdam_data.apply(lambda x: 1 if x['len_center'] > 1 else 0, axis = 1)\n\n#Check whether opponnents could still have a trump card\nfor i in range(1,4):\n rotterdam_data['Player{}_has_trump'.format(i)] = rotterdam_data.apply(lambda x: player_has_trump(x['HasColor{}'.format(i)], x['Troef']), axis = 1)\n\n#Check whether current player can play a card which is higher than th current played cards\nrotterdam_data['Has_higher'] = rotterdam_data.apply(lambda x: check_has_higher(x['Center'], x['Cards'], x['Troef'])[0], axis = 1)\nprint('4')\n\n#Get the cards that are higher\nrotterdam_data['Higher_cards'] = rotterdam_data.apply(lambda x: check_has_higher(x['Center'], x['Cards'], x['Troef'])[1], axis = 1)\n\n#Check whether teammate is winning\nrotterdam_data['Teammate_winning'] = rotterdam_data.apply(lambda x: teammate_winning(x['Center'], x['Troef'], x['len_center']) if x['Teammate_played'] == 1 else 0, axis = 1)\nprint('5')\n\n#Get the two highest cards of each suit still in game\nrotterdam_data['Highest_cards'] = rotterdam_data.apply(lambda x: get_highest_cards(x['CardsPlayed0'], x['CardsPlayed1'], x['CardsPlayed2'], x['CardsPlayed3']), axis = 1)\n\n#Check whether current player has one of the highest cards of the asked suit\nrotterdam_data['Has_highest_suit'] = rotterdam_data.apply(lambda x: check_has_card(x['Cards'], x['Center'], x['Highest_cards'], 0), axis = 1)\nrotterdam_data['Has_second_highest_suit'] = rotterdam_data.apply(lambda x: check_has_card(x['Cards'], x['Center'], x['Highest_cards'], 1), axis = 1)\nprint('6')\n\n#Check whether the highest card is still in game or already played in the center\nrotterdam_data['Still_in_game'] = rotterdam_data.apply(lambda x: still_in_game(x['Center'], x['Highest_cards'], x['Has_highest_suit']), axis = 1)\n\n#Check whether with the current center and cards in hand a street can be made and get those cards\nrotterdam_data['Cards_to_create_street'] = rotterdam_data.apply(lambda x: check_can_create_street(x['Center'], x['Cards']) if x['len_center'] > 1 else [], axis = 1)\nrotterdam_data['Can_create_street'] = rotterdam_data.apply(lambda x: 1 if int(len(x['Cards_to_create_street']) > 0) else 0, axis = 1)\nprint('7')\n\n#Get lowest and highest value of cards in the hand of current player\nrotterdam_data['Lowest_value'] = rotterdam_data.apply(lambda x: get_lowest_value(x['Cards'], x['Troef']), axis = 1)\nrotterdam_data['Highest_value'] = rotterdam_data.apply(lambda x: get_highest_value(x['Cards'], x['Troef']), axis = 1)\n\nrotterdam_data['Has_trump'] = rotterdam_data.apply(lambda x: current_player_has_suit(x['Cards'], x['Troef']), axis = 1)\nrotterdam_data['Has_suit'] = rotterdam_data.apply(lambda x: current_player_has_suit(x['Cards'], x['Center'][0].suit) if x['Asked_suit'] == '1' or x['Asked_suit'] == '2' else 1, axis = 1)\n \nrotterdam_data['Suit_cards'] = rotterdam_data.apply(lambda x: get_suit_center(x['Center'], x['Troef']) if x['len_center'] != 0 else [np.nan, np.nan], axis = 1)\nrotterdam_data[['Suit_card2', 'Suit_card3']] = pd.DataFrame(rotterdam_data.Suit_cards.to_list())\n#Checks whether all players have the suit of the first card played\nfor i in range(4):\n rotterdam_data['Player{}_has_suit'.format(i)] = rotterdam_data.apply(lambda x: player_has_suit(x['len_center'], x['Center'], x['HasColor{}'.format(i)]), axis = 1)\nprint('8')\n#Create a dummie to transform turns\nturn_dummie = pd.get_dummies(rotterdam_data['Player_turn']).rename(columns = {'0': 't1', '1': 't2', '2': 't3', '3': 't4'})\nasked_suit_dummie = pd.get_dummies(rotterdam_data['Asked_suit']).rename(columns = {'1' : '1_t', '2' : '1_nt'})\ncard2_dummie = pd.get_dummies(rotterdam_data['Suit_card2']).rename(columns = {'0' : '2_n', '1' : '2_s', '2' : '2_t'})\ncard3_dummie = pd.get_dummies(rotterdam_data['Suit_card3']).rename(columns = {'0' : '3_n', '1' : '3_s', '2' : '3_t'})\n\nrotterdam_data = pd.concat([rotterdam_data, asked_suit_dummie, turn_dummie, card2_dummie, card3_dummie], axis = 1)\n\nprint('9')\n\nfor i in range(1,4):\n rotterdam_data['Rank_card{}'.format(i)] = rotterdam_data.apply(lambda x: x['Center'][i-1].order(x['Troef']) if x['len_center'] > i-1 else 0, axis = 1)\n\n#Split the database for each card in hand\nrotterdam_data = rotterdam_data.explode('Cards').reset_index()\n\n#If center is empty, check for each card if it is the highest of the suit\nrotterdam_data['Has_highest_suit'] = rotterdam_data.apply(lambda x: check_has_card_empty_center(x['Cards'], x['Highest_cards'], 0) if np.isnan(x['Has_highest_suit']) else x['Has_highest_suit'], axis = 1)\nrotterdam_data['Has_second_highest_suit'] = rotterdam_data.apply(lambda x: check_has_card_empty_center(x['Cards'], x['Highest_cards'], 1) if np.isnan(x['Has_second_highest_suit']) else x['Has_second_highest_suit'], axis = 1)\n\nrotterdam_data['Still_in_game'] = rotterdam_data.apply(lambda x: 0 if x['Has_highest_suit'] == 1 else x['Still_in_game'], axis = 1)\n\n#Checks whether this card is the highest or second highest card\nrotterdam_data['Is_highest_suit'] = rotterdam_data.apply(lambda x: check_is_card(x['Cards'], x['Highest_cards'], 0) if x['Has_highest_suit'] == 1 else 0, axis = 1)\nrotterdam_data['Is_second_highest_suit'] = rotterdam_data.apply(lambda x: check_is_card(x['Cards'], x['Highest_cards'], 1) if x['Has_second_highest_suit'] == 1 else 0, axis = 1)\nprint('10')\n#Checks whether this card is higher than all cards in center\nrotterdam_data['Is_higher'] = rotterdam_data.apply(lambda x: int(x['Cards'] in x['Higher_cards']) if x['Has_higher'] == 1 else 0, axis = 1)\n\n#Checks whether this card is lowest or highest value\nrotterdam_data['Is_lowest_value'] = rotterdam_data.apply(lambda x: 1 if x['Cards'].points(x['Troef']) == x['Lowest_value'] else 0, axis = 1)\nrotterdam_data['Is_highest_value'] = rotterdam_data.apply(lambda x: 1 if x['Cards'].points(x['Troef']) == x['Highest_value'] else 0, axis = 1)\nprint('11')\n#Checks whether this card creates a street\nrotterdam_data['Creates_street'] = rotterdam_data.apply(lambda x: 1 if x['Cards'] in x['Cards_to_create_street'] else 0, axis = 1)\n\n#Gets the suit of the card. 2 if card is trump, 1 if card is asked suit, 0 otherwise\nrotterdam_data['Player_suit'] = rotterdam_data.apply(lambda x: get_player_suit(x['Center'], x['Cards'], x['Troef']), axis = 1)\n\n#Check whether the current card is the played card. This is the target\nrotterdam_data['Is_played_card'] = rotterdam_data.apply(lambda x: 1 if x['Cards'].suit + str(x['Cards'].value) == x['PlayCard'] else 0, axis = 1)\nprint('12')\n#Check if the center is empty, whether opponents have the suit of the current card\nfor i in range(4):\n rotterdam_data['Player{}_has_suit'.format(i)] = rotterdam_data.apply(lambda x: player_has_suit_no_center(x['Cards'], x['HasColor{}'.format(i)]) if np.isnan(x['Player{}_has_suit'.format(i)]) else x['Player{}_has_suit'.format(i)], axis = 1)\n\nrotterdam_data['rank_player_card'] = rotterdam_data.apply(lambda x: x['Cards'].order(x['Troef']), axis = 1)\n \nplayer_suit_dummie = pd.get_dummies(rotterdam_data['Player_suit']).rename(columns = {'0': 'p_none', '1' : 'p_s', '2' : 'p_t'})\n\nrotterdam_data = pd.concat([rotterdam_data, player_suit_dummie], axis = 1)\nprint('13')\n#Transform the cards back from classes to normal cards. For debugging purposes\nrotterdam_data['Cards'] = rotterdam_data.apply(lambda x: transform_back(x['Cards']), axis = 1)\nrotterdam_data['Center'] = rotterdam_data.apply(lambda x: transform_back(x['Center']), axis = 1)\nfor i in range(4):\n rotterdam_data['CardsPlayed{}'.format(i)] = rotterdam_data.apply(lambda x: transform_back(x['CardsPlayed{}'.format(i)]), axis = 1)\nprint('14')\n\n#Create set of features and target\nattributes = rotterdam_data[['rank_player_card', 'Rank_card1', 'Rank_card2', 'Rank_card3', 'Has_trump', '2_n', '2_s', '2_t', '3_n', '3_s', \n'3_t', 'Has_suit', '1_t', '1_nt', 'Still_in_game', 'Is_second_highest_suit', 'Is_highest_suit', \n'Has_second_highest_suit', 'Has_highest_suit', 'Player1_has_suit', 'Player2_has_suit', 'Player3_has_suit', 'Player1_has_trump', \n'Player2_has_trump','Player3_has_trump', 'Is_higher', 'Has_higher', 'No_playable_cards', 'Is_lowest_value', 'Is_highest_value', \n'Teammate_winning', 'Teammate_played', 'Can_create_street', 'Creates_street', 'p_none', 'p_s', 'p_t', 't1', 't2', \n't3', 't4']]\ntarget = rotterdam_data['Is_played_card']\n\nmean_percentages = [0 for i in range(8)]\n\nattributes = rotterdam_data[['rank_player_card', 'Rank_card1', 'Rank_card2', 'Rank_card3', 'Has_trump', '2_n', '2_s', '2_t', '3_n', '3_s', '3_t', 'Has_suit', '1_t', '1_nt', 'Still_in_game', 'Is_second_highest_suit', 'Is_highest_suit', \n'Has_second_highest_suit', 'Has_highest_suit', 'Player1_has_suit', 'Player2_has_suit', 'Player3_has_suit', 'Player1_has_trump', \n'Player2_has_trump','Player3_has_trump', 'Is_higher', 'Has_higher', 'No_playable_cards', 'Is_lowest_value', 'Is_highest_value', \n'Teammate_winning', 'Teammate_played', 'Can_create_street', 'Creates_street', 'p_none', 'p_s', 'p_t', 'first_turn', 'second_turn', \n'third_turn', 'last_turn']]\ntarget = rotterdam_data['Is_played_card']\n\nmean_percentages = [0 for i in range(8)]\n\n\nprint(i)\n#Split the dataset into \nattributes_train, attributes_test, target_train, target_test = train_test_split(attributes, target, test_size=0.25)\n\n#Train a classifier on the dataset\nrf = RandomForestClassifier()\nrf = rf.fit(attributes_train, target_train)\npredictions = rf.predict_proba(attributes)\n\npredictions = list(map(get_second_element, predictions))\n\nrotterdam_data['Prediction'] = predictions\n\nalready_checked = [False for i in rotterdam_data.index]\n\npercentages = {}\nfor i in range(1,9):\n percentages['playable_cards{}'.format(i)] = 0\n percentages['playable_cards{}_good'.format(i)] = 0\n\nrotterdam_data['Chosen_card'] = np.nan\n\nfor index in rotterdam_data.index:\n checked = already_checked[index]\n\n if checked:\n continue\n\n no_cards = rotterdam_data['No_playable_cards'][index]\n\n #Get the card with the highest probability of being played for each hand\n highest_prediction = 0\n for i in range(no_cards):\n already_checked[index + i] = True\n prediction = rotterdam_data['Prediction'][index + i]\n if prediction > highest_prediction:\n highest_prediction = prediction\n best_card = rotterdam_data['Cards'][index + i][0]\n rotterdam_data.at[index, 'Chosen_card'] = best_card\n percentages['playable_cards{}'.format(no_cards)] += 1\n\n if rotterdam_data['PlayCard'][index] == rotterdam_data['Chosen_card'][index]:\n percentages['playable_cards{}_good'.format(no_cards)] += 1\n\nno_cards = 0\nno_good_cards = 0\nfor i in range(1,9):\n percentage = (percentages['playable_cards{}_good'.format(i)]/percentages['playable_cards{}'.format(i)]) * 100\n mean_percentages[i- 1] += percentage\n no_cards += percentages['playable_cards{}'.format(i)]\n no_good_cards += percentages['playable_cards{}_good'.format(i)]\n\nfor i in range(8):\n mean_percentages[i] = mean_percentages[i]/1\n print('Aantal keer dezelfde kaart bij {} kaarten: %.2f'.format(i + 1) % mean_percentages[i], '%')\n\njoblib.dump(rf, \"./played_card_prediction.joblib\")\nprint(time.time() - start_time)\n\nprint(no_good_cards/no_cards)\n","repo_name":"lennardhordijk/Klaverjas","sub_path":"Played_card_prediction.py","file_name":"Played_card_prediction.py","file_ext":"py","file_size_in_byte":22626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13482227518","text":"class Test:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def first(self, x, y):\n z = x + y\n return z\n\n def second(self, b, x, y):\n a = self.first(x, y)\n c = a + b\n return c\n\n def third(self, b):\n a = self.first(self.x, self.y)\n c = a + b\n return c\n\n\nif __name__ == '__main__':\n test = Test(1, 2)\n # student = Student()\n\n x = test.second(4, 3, 2)\n print(x)\n\n y = test.third(3)\n print(y)\n\n\n\n\n","repo_name":"Zapel/asynchronous_python","sub_path":"class/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35156647035","text":"import glob\nimport sep\nimport numpy as np\nfrom astropy.wcs import WCS\nimport astropy.io.fits as pyfits\nfrom matplotlib import pyplot as plt\nfrom scipy.ndimage import interpolation as interp\n\n#real dirty way to find offsets to stack\ndef find_offset(ref_x, ref_y, _data):\n #Get the objects in our current image\n _data = _data.byteswap().newbyteorder()\n _data_bkg = sep.Background(_data)\n data_objs = sep.extract(_data-_data_bkg, thresh=10.0, err=_data_bkg.globalrms, minarea=20)\n \n #loop over them and calculate their distances\n for i,j in enumerate(data_objs['x']):\n shift_x = ref_x - data_objs['x'][i]\n shift_y = ref_y - data_objs['y'][i]\n distance = np.sqrt((shift_x)**2+(shift_y)**2)\n \n # if the distance is less than this threshhold, its a match\n # return the offset x,y\n if distance < 10:\n return shift_x, shift_y\n break\n return None, None\n\nclass astroRGB():\n def __init__(self, filters=['Harris-R', 'Harris-V', 'Bessell-U'], fits_files=None, savefig=False, pltname=None):\n self.filters=filters\n self.fits_files = fits_files\n self.grouped_fits_files = {}\n self.savefig = savefig\n self.pltname = pltname\n\n #if no fits files are passed in\n # grab the ones in the current directory\n if self.fits_files is None:\n self.fits_files = files = glob.glob('*fits')\n if not len(self.fits_files):\n assert False, 'No fits files found to create RBG image'\n\n if not len(self.filters):\n assert False, \"No filters...?\"\n\n #create the grouped_fits_files object with the passed in filters as a key\n for _filter in self.filters:\n self.grouped_fits_files[_filter] = []\n\n available_filters = []\n #create the grouped fits dictionary based off filter\n for f in [x for x in self.fits_files if 'samastro_stack' not in x]:\n _filter = pyfits.getheader(f)['FILTER']\n available_filters.append(_filter)\n if _filter in self.filters:\n self.grouped_fits_files[_filter].append(f)\n\n available_filters = list(set(available_filters))\n\n #if no fits files were found for a requested filter, alert\n for _filter in self.filters:\n if not len(self.grouped_fits_files[_filter]):\n print()\n assert False, 'No fits files corresponding to filter: {}.. Available filters in fits_files list: {}'.format(_filter, available_filters)\n\n if self.savefig and not self.pltname:\n self.pltname = 'ImaNinnyNoPlotName.png' \n\n\n def stackimages(self, filters=[], offset_thresh=5, skipifexist=True):\n if filters is None or not len(filters):\n filters = self.filters\n\n ref_file = self.grouped_fits_files[list(self.grouped_fits_files.keys())[0]][0]\n ref_img = pyfits.getdata(ref_file)\n ref_img = ref_img.byteswap().newbyteorder()\n ref_bkg = sep.Background(ref_img)\n ref_objects = sep.extract(ref_img-ref_bkg, thresh=10.0, err=ref_bkg.globalrms, minarea=20)\n\n image_size = np.shape(ref_img)\n size = list(image_size)[0]\n\n for band in filters:\n print('Stacking all images in filter: {}'.format(band))\n outfname = 'samastro_stack_{}.fits'.format(band)\n allfiles = glob.glob('*.fits')\n \n if outfname not in allfiles or not skipifexist:\n _shift_data = np.zeros(image_size)\n \n #loop over the files associated with each band\n for file in self.grouped_fits_files[band]:\n \n #get the image data\n tmp_data = pyfits.getdata(file)\n \n #our lists to hold the offsets\n sx, sy = [], []\n \n #loop over each reference object\n for i,j in enumerate(ref_objects['x']):\n #find and append the offsets to our lists\n tmp_sx, tmp_sy = find_offset(ref_objects['x'][i], ref_objects['y'][i], tmp_data)\n if tmp_sx != None:\n sx.append(tmp_sx)\n sy.append(tmp_sy)\n\n #calculate the average offset\n if len(sx) and len(sy):\n shift_x, shift_y = np.mean(sx), np.mean(sy)\n else:\n shift_x, shift_y = 0, 0\n\n print('Average offsets in x: {}, y: {}\\n'.format(round(shift_x, 3), round(shift_y, 3)))\n if shift_x != 0 and shift_y != 0 and abs(shift_x) < offset_thresh and abs(shift_y) < offset_thresh:\n #scipy method that shifts the image based off these offsets\n new_data = interp.shift(tmp_data, [shift_y, shift_x])\n #new coadded data from the shift\n _shift_data += new_data\n try:\n stacked_average_data = _shift_data/float(len(self.grouped_fits_files[band]))\n hdu = pyfits.PrimaryHDU(stacked_average_data)\n hdu.writeto(outfname, overwrite=True)\n except:\n #this should only happen if no images were able to be stacked for a given filter\n print('Fits images for filter {} could not be stacked. Make sure they are all of the same field'.format(band))\n else:\n print('Fits images for filter {} have already been stacked: {}'.format(band, outfname))\n \n\n def plot(self, rgb_scale=np.array([0.23, 0.22, 0.28])*1, quad=0.9, show=True, savefig=True):\n shift_band_image = {}\n ref_file = self.grouped_fits_files[list(self.grouped_fits_files.keys())[0]][0]\n ref_img = pyfits.getdata(ref_file)\n ref_img = ref_img.byteswap().newbyteorder()\n\n image_size = np.shape(ref_img)\n size = list(image_size)[0]\n simpleRGB=np.zeros((size,size,3),dtype=float)\n allfiles = glob.glob('*.fits')\n\n for band in self.filters:\n stacked_file = 'samastro_stack_{}.fits'.format(band)\n if stacked_file not in allfiles:\n self.stackimages(filters=[band])\n \n stacked_data = pyfits.getdata(stacked_file)\n\n shift_band_image.update({band: stacked_data})\n\n for i in range(len(self.filters)):\n data = shift_band_image[self.filters[i]].copy()\n min_value = np.quantile(data, [0.02, 1-0.02])[0]\n max_value = np.quantile(data, [0.02, 1-0.02])[1]\n data = (data - min_value)/(max_value-min_value)\n simpleRGB[:,:,i]=(data*rgb_scale[i])**quad\n \n ax = plt.subplot()\n ax.tick_params(which = 'both', size = 0, labelsize = 0)\n ax.imshow(simpleRGB, origin='lower')\n\n if savefig:\n plt.savefig(self.pltname, format='png', dpi=1000)\n\n if show:\n plt.show()\n\nimg = astroRGB(filters=['Harris-R', 'Harris-V', 'Bessell-U'], savefig=True)\nimg.stackimages(skipifexist=True)\nimg.plot(show=True, rgb_scale=np.array([0.23, 0.22, 0.28])*1, quad=0.9)","repo_name":"swyatt7/astroRGB","sub_path":"samastro_colorimage.py","file_name":"samastro_colorimage.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73719371367","text":"from lino.api import dd, _\n\n\nclass TranslatorTypes(dd.ChoiceList):\n\n \"\"\"\n Types of registries for the Belgian residence.\n \n \"\"\"\n verbose_name = _(\"Translator type\")\n\nadd = TranslatorTypes.add_item\nadd('10', _(\"SETIS\"))\nadd('20', _(\"Other\"))\nadd('30', _(\"Private\"))\n\n\n\nclass StartingReasons(dd.ChoiceList):\n\n verbose_name = _(\"Starting reason\")\n\nadd = StartingReasons.add_item\nadd('100', _(\"Voluntarily\"))\nadd('200', _(\"Mandatory\"))\n\nclass OldEndingReasons(dd.ChoiceList):\n\n verbose_name = _(\"Old Ending reason\")\n\nadd = OldEndingReasons.add_item\nadd('100', _(\"Successfully ended\"))\nadd('200', _(\"Health problems\"))\nadd('300', _(\"Familiar reasons\"))\nadd('400', _(\"Missing motivation\"))\nadd('500', _(\"Return to home country\"))\nadd('900', _(\"Other\"))\n\n\nclass ProfessionalStates(dd.ChoiceList):\n\n verbose_name = _(\"Professional situation\")\n\nadd = ProfessionalStates.add_item\nadd('100', _(\"Student\"))\nadd('200', _(\"Workless\"))\nadd('300', _(\"Seeking\"))\nadd('400', _(\"Employed\"))\nadd('500', _(\"Independent\"))\nadd('600', _(\"Retired\")) # pensioniert\nadd('700', _(\"Unemployable\")) # arbeitsunfähig\n\n\n# class ClientStates(dd.Workflow):\n# verbose_name_plural = _(\"Client states\")\n# default_value = 'newcomer'\n \n\n# add = ClientStates.add_item\n# add('05', _(\"Reception\"), 'reception')\n# add('10', _(\"Newcomer\"), 'newcomer') # \"first contact\" in Avanti\n# add('20', _(\"Refused\"), 'refused')\n# add('30', _(\"Coached\"), 'coached')\n# add('50', _(\"Former\"), 'former')\n","repo_name":"lino-framework/avanti","sub_path":"lino_avanti/lib/avanti/choicelists.py","file_name":"choicelists.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"39728342320","text":"#!/usr/bin/env python3\n#\n#euler111.py / primes with runs\nfrom math import sqrt\nimport time\n\n# Debut du decompte du temps\nstart_time = time.time()\n\ndef isprime(x):\n for i in range(2,int(sqrt(x))+1):\n if x%i==0:\n return False\n return True\n\nliste_chiffres = [i for i in range(10)]\nliste_chiffres_sans_zero = [i for i in range(1,10)]\nliste_impair = [2*i+1 for i in range(5)]\n\ndef calcul(n,d):\n nb_premiers=0\n liste_premiers=[]\n if d == 0 : ## cas d = 0 : tests x00...00x\n for i in liste_chiffres_sans_zero:\n for j in liste_impair:\n my_string=str(i)+'0'*(n-2)+str(j)\n if isprime(int(my_string)) == True:\n liste_premiers.append(int(my_string))\n nb_premiers = n-2\n elif d%2 == 0: ## cas d = pair : tests dd...ddx\n for i in liste_impair:\n if isprime(int(str(d)*(n-1)+str(i))) == True: ## type 1113/1115/1117/1119\n liste_premiers.append(int(str(d)*(n-1)+str(i)))\n nb_premiers = n-1\n if nb_premiers == 0: ##pas de nb premier de type dd...ddx\n nb_premiers = n-2\n for shift in range(n-1):\n for impair in liste_impair:\n liste_test = [int(str(d) * shift + str(i) + str(d) * (n - shift - 2) + str(impair)) for i in range(10)]\n for test in liste_test:\n if isprime(test) == True and len(str(test)) == n:\n liste_premiers.append(test)\n else : ## cas d = impair : tests dd...ddx\n for i in liste_impair:\n if isprime(int(str(d)*(n-1)+str(i))) == True: ## type 1113/1115/1117/1119\n liste_premiers.append(int(str(d)*(n-1)+str(i)))\n nb_premiers = n-1\n for shift in range(n-1):\n liste_test = [int(str(d)*shift + str(i) + str(d) * (n-shift-1)) for i in range(10)] #type 1011/1111/1211/1311...\n for test in liste_test:\n if isprime(test) == True :\n liste_premiers.append(test)\n nb_premiers = n - 1\n return nb_premiers,liste_premiers\n\n#main loop\nn=10\ntotal_sum=0\nfor d in range(10):\n M,liste = calcul(n,d)[0],calcul(n,d)[1]\n N=len(liste)\n S=0\n for i in liste:\n S+=i\n print(d,M,N,S,liste)\n total_sum+=S\nprint(total_sum)\n\n\n\n# Affichage du temps d execution\nprint(\"Temps d execution : %s secondes ---\" % (time.time() - start_time))\n\n","repo_name":"allagonne/Euler_project","sub_path":"euler111.py","file_name":"euler111.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28678650839","text":"# Modules\nimport os\nimport csv\n\n# Set path for budget file\nbudget_file_path = os.path.join(\"..\", \"Resources\", \"budget_data.csv\")\n\nmonths = []\t\t\t\t# months present in the dataset\nrevenue = []\t\t\t# profit/loss values in the dataset\nrevenue_change = [] \t# difference in the profit/loss between each month\n\t\ntotal_months = 0\t\t# total number of months in the dataset\ntotal_profit_loss = 0 \t# overall totoal amount of profit_loss \naverage_change = 0\t\t# overall average change \nmax_profit = 0\t\t\t# maximum dollar amount profit in the entire period\nmax_loss = 0\t\t\t# maximum dollar amount loss in the entire period\n\n# Open the CSV\nwith open(budget_file_path) as budget_file:\n\tbudget_reader = csv.reader(budget_file, delimiter=\",\")\t\n\t#skip the header\n\tbudget_header = next(budget_reader)\n\t\n\t#loop through the file to get to the solution\n\tfor row in budget_reader:\n\t\t#grab all the months into months list\n\t\tmonths.append(row[0])\n\n\t\t#grab all the profit/loss values into revenue list and cast it to int.\n\t\trevenue.append(int(row[1]))\n\t\t\n# The total number of months included in the dataset\ntotal_months = len(months)\n\n# The net total amount of \"Profit/Losses\" over the entire period\ntotal_profit_loss = sum(revenue)\n\nfor i in range(len(revenue) - 1):\n\tprofit_loss = revenue[i+1] - revenue[i]\n\trevenue_change.append(profit_loss)\n\n# The average of the changes in \"Profit/Losses\" over the entire period\naverage_change = round(sum(revenue_change)/len(revenue_change),2)\n\n# The greatest increase in profits (date and amount) over the entire period\nmax_profit = max(revenue_change)\nmax_profit_month = months[revenue_change.index(max_profit)+1]\n\n# The greatest decrease in losses (date and amount) over the entire period\nmax_loss = min(revenue_change)\nmax_loss_month = months[revenue_change.index(max_loss)+1]\t\n\n#Print Statements\n\nprint('Financial Analysis')\nprint('------------------------------------------------'+'\\n')\nprint(\"Total Months: \" + str(total_months))\nprint(\"Total: $ \" + str(total_profit_loss)) \nprint(\"Average Change: $ \" + str(average_change))\nprint(f\"Greatest Increase in Profits: {max_profit_month} (${max_profit})\")\nprint(f\"Greatest Decrease in Profits: {max_loss_month} (${max_loss})\")\t\n\nwith open(\"financial_analysis.txt\", 'w') as text:\n\ttext.write(\"Financial Analysis\"+ \"\\n\")\n\ttext.write(\"--------------------------------------------------\\n\\n\")\n\ttext.write(\"Total Months: \" + str(total_months) + \"\\n\")\n\ttext.write(\"Total: $\" + str(total_profit_loss) + \"\\n\")\n\ttext.write(\"Average Change: $\" + str(average_change) + \"\\n\")\n\ttext.write(f\"Greatest Increase in Profits: {max_profit_month} (${max_profit}) \\n\")\n\ttext.write(f\"Greatest Decrease in Profits: {max_loss_month} (${max_loss})\")\t","repo_name":"samatha876/python-challenge","sub_path":"PyBank/Reolved/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32596170426","text":"def fizz_buzz(numb):\n if numb % 3 == 0 and numb % 5 == 0:\n return 'FizzBuzz'\n elif numb % 3 == 0:\n return 'Fizz'\n elif numb % 5 == 0:\n return 'Buzz'\n else:\n return numb\n\nn, b = input('Input range').split()\nwhile True:\n try:\n n = int(n)\n b = int(b)\n except ValueError:\n print('No letters')\n if n >= b:\n print('Again')\n else:\n break\n n, b = input(\"Input range\").split()\n n = int(n)\n b = int(b)\nb = b + 1\nfor numb in range(n, b):\n print(fizz_buzz(numb))","repo_name":"dakcope/fuzzbiz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4583123796","text":"import networkx as nx\nfrom constant import DEPOSITION_RATIO\nimport os\nimport json\n\nclass Input(object):\n def __init__(self, type = 'json'):\n '''\n Creating the input class\n :param type: the type of input, currently defaults to JSON\n '''\n self.type = type\n self.directory = os.path.dirname(os.path.abspath(__file__))\n\n def setFileName(self, filename):\n '''\n Setting the filename for the input if the file exists, otherwise setting to None.\n :param filename: the filename of the input file\n '''\n data_location = os.path.join(self.directory, 'data')\n\n # Data location exists\n if os.path.exists(data_location):\n file_location = os.path.join(data_location, filename)\n\n # File location exists\n if os.path.exists(file_location):\n self.file_location = file_location\n else:\n self.file_location = None\n\n def readFile(self, filename):\n '''\n Reading from the file, if the file actually exists\n :param filename: the filename for the input\n '''\n self.setFileName(filename)\n\n # Initialize data to None\n self.data = None\n\n # If the file exists\n if self.file_location:\n with open(self.file_location) as data_file:\n data = json.load(data_file)\n\n self.data = data\n\n\n\n def parseGraph(self):\n '''\n This method would read the input from a file.\n The input would be in the form of (x,y) for every node\n '''\n\n nodes = {}\n edges = []\n\n for index in range(2):\n nodes['A'+str(index)] = {\n 'x': 0,\n 'y': 0\n }\n nodes['B'+str(index)] = {\n 'x': 0,\n 'y': 2\n }\n nodes['C'+str(index)] = {\n 'x': 2,\n 'y': 2\n }\n\n edges.append({\n 'start': 'A'+str(index),\n 'end': 'B'+str(index)\n })\n edges.append({\n 'start': 'B'+str(index),\n 'end': 'C'+str(index)\n })\n\n for node in ['A','B','C']:\n edges.append({\n 'start': node + '0',\n 'end': node + '1'\n })\n\n self.edges = edges\n self.nodes = nodes\n\n\n def createGraph(self):\n graph = nx.Graph()\n\n for label, node in self.nodes.iteritems():\n graph.add_node(label)\n\n for edge in self.edges:\n if edge['start'][:1] != edge['end'][:1]:\n graph.add_edge(edge['start'], edge['end'], weight = DEPOSITION_RATIO)\n else:\n graph.add_edge(edge['start'], edge['end'], weight = 0)\n\n self.graph = graph\n\nif __name__ == '__main__':\n input = Input()\n input.readFile('input.json')\n input.createBasicGraph()\n pass","repo_name":"seanxyuan/TSP_Robot_Path_Planning","sub_path":"ArtificialNodes/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"43443493254","text":"\"\"\"Discretization methods\"\"\"\nfrom copy import deepcopy\n\nimport numpy as np\nfrom caimcaim import CAIMD\n\n\ndef discretize_data(data, method, **kwargs):\n data_cpy = deepcopy(data)\n\n _, nb_cols = data_cpy.shape\n\n if method is caim_binning:\n x_discr = caim_binning(data, **kwargs)\n for idx in range(nb_cols):\n data_cpy[:, idx] = x_discr[:, idx]\n else:\n for idx in range(nb_cols):\n data_cpy[:, idx] = method(data_cpy[:, idx], **kwargs)\n\n return data_cpy\n\n\ndef equal_width(attr_data, nb_bins):\n attr_data = attr_data.astype('float64')\n _, bins = np.histogram(attr_data, bins=nb_bins)\n return np.fmin(np.digitize(attr_data, bins), nb_bins)\n\n\ndef equal_freq(attr_data, nb_bins):\n \"\"\"Inspired by:\n https://stackoverflow.com/questions/39418380/histogram-with-equal-number-of-points-in-each-bin\n \"\"\"\n attr_data = attr_data.astype('float64')\n\n nb_data = len(attr_data)\n bins = np.interp(np.linspace(0, nb_data, nb_bins + 1),\n np.arange(nb_data),\n np.sort(attr_data))\n return np.fmin(np.digitize(attr_data, bins), nb_bins)\n\n\ndef caim_binning(x, y):\n \"\"\"https://github.com/airysen/caimcaim\"\"\"\n caim = CAIMD()\n x_discr = caim.fit_transform(x, y)\n return x_discr\n\n\ndef calculate_freedman_diaconis(x):\n q75, q25 = np.percentile(x, [75, 25])\n iqr = q75 - q25\n nb_bins = int((2 * iqr) / np.cbrt(x.shape[0]))\n #print('Freedman–Diaconis:', nb_bins)\n return nb_bins\n\n\ndef discretize_data_wrapper(discr_method, x, y):\n if discr_method in (equal_freq, equal_width):\n kwargs = dict(nb_bins=calculate_freedman_diaconis(x))\n else:\n kwargs = dict(y=y)\n x = discretize_data(x, discr_method, **kwargs)\n return x\n","repo_name":"pbielak/DAN-IMAD","sub_path":"laboratory/bayes_classifier/bc/discretization.py","file_name":"discretization.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17083709764","text":"#init a base\n\nimport numpy as np\nfrom parm import Para as P\nimport math\n\ndef init3D(P):\n patsize = P.patsize\n r = P.r\n Dmat = np.random.rand(patsize**3,r)*2-1\n Dm = np.sqrt(np.sum(Dmat*Dmat,axis = 0)) # a 1*30 array\n szi =np.shape(Dm)\n for i in range(szi[0]):\n Dmat[:,i] =Dmat[:,i]/Dm[i]\n\n D = np.transpose(np.reshape(Dmat,[patsize**2,patsize,r],order = 'F'),[0,2,1])\n return D\n\nif __name__ == '__main__':\n print(init3D(P).shape)\n\n\n","repo_name":"XiaoYangLiu-FinRL/Tensor_Sparse_Coding","sub_path":"python sparse coding/initbase.py","file_name":"initbase.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"38125524712","text":"import cv2 as cv\r\nimport numpy as np\r\n# 26.開閉操作\r\n\r\n\r\ndef open_demo(image):\r\n print(image.shape)\r\n gray = cv. cvtColor(image, cv.COLOR_BGR2GRAY)\r\n ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU) \r\n cv.imshow(\"binary\", binary)\r\n kernel = cv.getStructuringElement(cv.MORPH_RECT, (35, 1)) \r\n # morphology 形態學 MORPH_OPEN 開操作\r\n binary_open = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel) \r\n cv.imshow(\"open-result\", binary_open)\r\n\r\n\r\ndef close_demo(image):\r\n print(image.shape)\r\n gray = cv. cvtColor(image, cv.COLOR_BGR2GRAY)\r\n ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) \r\n cv.imshow(\"binary\", binary)\r\n kernel = cv.getStructuringElement(cv.MORPH_RECT, (15, 15))\r\n # morphology 形態學 MORPH_CLOSE 閉操作\r\n binary_close = cv.morphologyEx(binary, cv.MORPH_CLOSE, kernel) \r\n cv.imshow(\"close-result\", binary_close)\r\n\r\n\r\n\r\nprint(\"-------hello python--------\")\r\nsrc = cv.imread(\"F:/026.jpg\") \r\ncv.namedWindow(\"input image\", cv.WINDOW_AUTOSIZE)\r\ncv.imshow(\"image\", src)\r\n\r\nopen_demo(src)\r\n# close_demo(src)\r\n\r\ncv.waitKey(0)\r\n\r\ncv.destoryAllWindows()\r\n","repo_name":"HJHJKOKO/learning_to_OpenCV","sub_path":"26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1111910091","text":"#!/usr/bin/env python3\n\nimport sys\nimport subprocess\n\nfrom .kast import *\nfrom .kast import _notif, _warning, _fatal\n\ndef buildAssoc(unit, join, ls):\n \"\"\"Build an associative binary operator term given the join and unit ops.\n\n - Input: unit, join, and list of elements to join.\n - Output: cons-list style construction of the joined term.\n \"\"\"\n ls = list(filter(lambda l: l != unit, ls))\n if len(ls) == 0:\n return unit\n if len(ls) == 1:\n return ls[0]\n if ls[0] == unit:\n return buildAssoc(unit, join, ls[1:])\n return KApply(join, [ls[0], buildAssoc(unit, join, ls[1:])])\n\ndef buildCons(unit, cons, ls):\n \"\"\"Build a cons operator term given the cons and unit ops.\n\n - Input: unit, cons, and list of elements to join.\n - Output: cons-list style construction of the joined term.\n \"\"\"\n if len(ls) == 0:\n return unit\n return KApply(cons, [ls[0], buildCons(unit, cons, ls[1:])])\n\ndef match(pattern, kast):\n \"\"\"Perform syntactic pattern matching and return the substitution.\n\n - Input: a pattern and a kast term.\n - Output: substitution instantiating the pattern to the kast term.\n \"\"\"\n subst = {}\n if isKVariable(pattern):\n return { pattern[\"name\"] : kast }\n if isKToken(pattern) and isKToken(kast):\n return {} if pattern[\"token\"] == kast[\"token\"] else None\n if isKApply(pattern) and isKApply(kast) \\\n and pattern[\"label\"] == kast[\"label\"] and len(pattern[\"args\"]) == len(kast[\"args\"]):\n for (patternArg, kastArg) in zip(pattern[\"args\"], kast[\"args\"]):\n argSubst = match(patternArg, kastArg)\n subst = combineDicts(subst, argSubst)\n if subst is None:\n return None\n return subst\n if isKRewrite(pattern) and isKRewrite(kast):\n lhsSubst = match(pattern['lhs'], kast['lhs'])\n rhsSubst = match(pattern['rhs'], kast['rhs'])\n return combineDicts(lhsSubst, rhsSubst)\n if isKSequence(pattern) and isKSequence(kast) and len(pattern['items']) == len(kast['items']):\n for (patternItem, substItem) in zip(pattern['items'], kast['items']):\n itemSubst = match(patternItem, substItem)\n subst = combineDicts(subst, itemSubst)\n if subst is None:\n return None\n return subst\n return None\n\ndef onChildren(kast, effect):\n if isKApply(kast):\n return KApply(kast['label'], [ effect(arg) for arg in kast['args'] ])\n elif isKRewrite(kast):\n return KRewrite(effect(kast['lhs']), effect(kast['rhs']))\n elif isKSequence(kast):\n return KSequence([ effect(item) for item in kast['items'] ])\n return kast\n\ndef traverseBottomUp(kast, effect):\n return effect(onChildren(kast, lambda _kast: traverseBottomUp(_kast, effect)))\n\ndef traverseTopDown(kast, effect):\n return onChildren(effect(kast), lambda _kast: traverseTopDown(_kast, effect))\n\ndef collectBottomUp(kast, callback):\n callback(onChildren(kast, lambda _kast: collectBottomUp(_kast, callback)))\n\ndef substitute(pattern, substitution):\n \"\"\"Apply a substitution to a pattern.\n\n - Input: a pattern with free variables and a substitution.\n - Output: the pattern with the substitution applied.\n \"\"\"\n def replace(k):\n if isKVariable(k) and k[\"name\"] in substitution:\n return substitution[k[\"name\"]]\n return k\n return traverseBottomUp(pattern, replace)\n\ndef whereMatchingBottomUp(effect, matchPattern, pattern):\n def _effect(k):\n matchingSubst = match(matchPattern, k)\n newK = k\n if matchingSubst is not None:\n newK = effect(matchingSubst)\n return newK\n return traverseBottomUp(_effect, pattern)\n\ndef replaceKLabels(pattern, klabelMap):\n def replace(k):\n if isKApply(k) and k[\"label\"] in klabelMap:\n return KApply(klabelMap[k[\"label\"]], k[\"args\"])\n return k\n return traverseBottomUp(pattern, replace)\n\ndef rewriteWith(rule, pattern):\n \"\"\"Rewrite a given pattern at the top with the supplied rule.\n\n - Input: A rule to rewrite with and a pattern to rewrite.\n - Output: The pattern with the rewrite applied once at the top.\n \"\"\"\n (ruleLHS, ruleRHS) = rule\n matchingSubst = match(ruleLHS, pattern)\n if matchingSubst is not None:\n return substitute(ruleRHS, matchingSubst)\n return pattern\n\ndef rewriteAnywhereWith(rule, pattern):\n \"\"\"Attempt rewriting once at every position in an AST bottom-up.\n\n - Input: A rule to rewrite with, and a pattern to rewrite.\n - Output: The pattern with rewrites applied at every node once starting from the bottom.\n \"\"\"\n return traverseBottomUp(pattern, lambda p: rewriteWith(rule, p))\n\ndef replaceWith(rule, pattern):\n (ruleLHS, ruleRHS) = rule\n if ruleLHS == pattern:\n return ruleRHS\n return pattern\n\ndef replaceAnywhereWith(rule, pattern):\n return traverseBottomUp(pattern, lambda p: replaceWith(rule, p))\n\ndef unsafeMlPredToBool(k):\n \"\"\"Attempt to convert an ML Predicate back into a boolean expression.\n\n This is unsafe in general because not every ML Predicate can be represented correctly as a boolean expression.\n This function just makes a best-effort to do this.\n \"\"\"\n if k is None:\n return None\n mlPredToBoolRules = [ (KApply('#Top', []) , KToken('true', 'Bool'))\n , (KApply('#Bottom', []) , KToken('false', 'Bool'))\n , (KApply('#And' , [KVariable('#V1'), KVariable('#V2')]) , KApply('_andBool_' , [KVariable('#V1'), KVariable('#V2')]))\n , (KApply('#Or' , [KVariable('#V1'), KVariable('#V2')]) , KApply('_orBool_' , [KVariable('#V1'), KVariable('#V2')]))\n , (KApply('#Not' , [KVariable('#V1')]) , KApply('notBool_' , [KVariable('#V1')]))\n , (KApply('#Equals' , [KVariable('#V1'), KVariable('#V2')]) , KApply('_==K_' , [KVariable('#V1'), KVariable('#V2')]))\n ]\n newK = k\n for rule in mlPredToBoolRules:\n newK = rewriteAnywhereWith(rule, newK)\n return newK\n\ndef simplifyBool(k):\n if k is None:\n return None\n simplifyRules = [ (KApply('_==K_', [KVariable('#LHS'), KToken('true', 'Bool')]), KVariable('#LHS'))\n , (KApply('_==K_', [KToken('true', 'Bool'), KVariable('#RHS')]), KVariable('#RHS'))\n , (KApply('_==K_', [KVariable('#LHS'), KToken('false', 'Bool')]), KApply('notBool_', [KVariable('#LHS')]))\n , (KApply('_==K_', [KToken('false', 'Bool'), KVariable('#RHS')]), KApply('notBool_', [KVariable('#RHS')]))\n , (KApply('notBool_', [KToken('false' , 'Bool')]), KToken('true' , 'Bool'))\n , (KApply('notBool_', [KToken('true' , 'Bool')]), KToken('false' , 'Bool'))\n , (KApply('notBool_', [KApply('_==K_' , [KVariable('#V1'), KVariable('#V2')])]), KApply('_=/=K_' , [KVariable('#V1'), KVariable('#V2')]))\n , (KApply('notBool_', [KApply('_=/=K_' , [KVariable('#V1'), KVariable('#V2')])]), KApply('_==K_' , [KVariable('#V1'), KVariable('#V2')]))\n , (KApply('notBool_', [KApply('_==Int_' , [KVariable('#V1'), KVariable('#V2')])]), KApply('_=/=Int_' , [KVariable('#V1'), KVariable('#V2')]))\n , (KApply('notBool_', [KApply('_=/=Int_' , [KVariable('#V1'), KVariable('#V2')])]), KApply('_==Int_' , [KVariable('#V1'), KVariable('#V2')]))\n , (KApply('_andBool_', [KToken('true', 'Bool'), KVariable('#REST')]), KVariable('#REST'))\n , (KApply('_andBool_', [KVariable('#REST'), KToken('true', 'Bool')]), KVariable('#REST'))\n , (KApply('_andBool_', [KToken('false', 'Bool'), KVariable('#REST')]), KToken('false', 'Bool'))\n , (KApply('_andBool_', [KVariable('#REST'), KToken('false', 'Bool')]), KToken('false', 'Bool'))\n , (KApply('_orBool_', [KToken('false', 'Bool'), KVariable('#REST')]), KVariable('#REST'))\n , (KApply('_orBool_', [KVariable('#REST'), KToken('false', 'Bool')]), KVariable('#REST'))\n , (KApply('_orBool_', [KToken('true', 'Bool'), KVariable('#REST')]), KToken('true', 'Bool'))\n , (KApply('_orBool_', [KVariable('#REST'), KToken('true', 'Bool')]), KToken('true', 'Bool'))\n ]\n newK = k\n for rule in simplifyRules:\n newK = rewriteAnywhereWith(rule, newK)\n return newK\n\ndef getOccurances(kast, pattern):\n occurances = []\n def addOccurance(k):\n if match(pattern, k):\n occurances.append(k)\n collectBottomUp(kast, addOccurance)\n return occurances\n\ndef countVarOccurances(kast, numOccurances = None):\n \"\"\"Count the number of occurances of each variable in a proof.\n\n - Input: Kast term.\n - Output: Map of variable names to their number of occurances.\n \"\"\"\n numOccurances = {} if numOccurances is None else numOccurances\n def _getNumOccurances(_kast):\n if isKVariable(_kast):\n vName = _kast['name']\n if vName in numOccurances:\n numOccurances[vName] += 1\n else:\n numOccurances[vName] = 1\n collectBottomUp(kast, _getNumOccurances)\n return numOccurances\n\ndef collectFreeVars(kast):\n return list(countVarOccurances(kast).keys())\n\ndef flattenLabel(label, kast):\n \"\"\"Given a binary tree of a label, return all the leaves.\n\n - Input: label of binary tree, and kast term.\n - Output: list of leaves of binary tree (singleton list for no occurance of label at top).\n \"\"\"\n if isKApply(kast) and kast['label'] == label:\n items = [ flattenLabel(label, arg) for arg in kast['args'] ]\n return [ c for cs in items for c in cs ]\n return [kast]\n\ndef splitConfigAndConstraints(kast):\n \"\"\"Split the configuration/term from the constraints.\n\n - Input: kast conjunct representing a constrained term.\n - Output: tuple of term and constraint.\n \"\"\"\n conjuncts = flattenLabel('#And', kast)\n term = None\n constraints = []\n for c in conjuncts:\n if isKApply(c) and isCellKLabel(c['label']):\n term = c\n else:\n constraints.append(c)\n constraint = buildAssoc(KConstant('#Top'), '#And', constraints)\n return (term, constraint)\n\ndef findCommonItems(l1, l2):\n common = []\n for i in l1:\n if i in l2:\n common.append(i)\n newL1 = []\n newL2 = []\n for i in l1:\n if not i in common:\n newL1.append(i)\n for i in l2:\n if not i in common:\n newL2.append(i)\n return (common, newL1, newL2)\n\ndef propagateUpConstraints(k):\n \"\"\"Try to propagate common constraints up disjuncts.\n\n - Input: kast disjunct of constrained terms (conjuncts).\n - Output: kast where common constraints in the disjunct have been propagated up.\n \"\"\"\n def _propagateUpConstraints(_k):\n pattern = KApply('#Or', [KApply('#And', [KVariable('G1'), KVariable('C1')]), KApply('#And', [KVariable('G2'), KVariable('C2')])])\n pmatch = match(pattern, _k)\n if pmatch is None:\n return _k\n (common1, l1, r1) = findCommonItems(flattenLabel('#And', pmatch['C1']), flattenLabel('#And', pmatch['C2']))\n (common2, r2, l2) = findCommonItems(r1, l1)\n common = common1 + common2\n if len(common) == 0:\n return _k\n g1 = pmatch['G1']\n if len(l2) > 0:\n g1 = buildAssoc(KConstant('#Top'), '#And', [g1] + l2)\n g2 = pmatch['G2']\n if len(r2) > 0:\n g2 = buildAssoc(KConstant('#Top'), '#And', [g2] + r2)\n return KApply('#And', [KApply('#Or', [g1, g2]), buildAssoc(KConstant('#Top'), '#And', common)])\n return traverseBottomUp(k, _propagateUpConstraints)\n\ndef splitConfigFrom(configuration):\n \"\"\"Split the substitution from a given configuration.\n\n Given an input configuration `config`, will return a tuple `(symbolic_config, subst)`, where:\n\n 1. `config == substitute(symbolic_config, subst)`\n 2. `symbolic_config` is the same configuration structure, but where the contents of leaf cells is replaced with a fresh KVariable.\n 3. `subst` is the substitution for the generated KVariables back to the original configuration contents.\n \"\"\"\n initial_substitution = {}\n _mkCellVar = lambda label: label.replace('-', '_').replace('<', '').replace('>', '').upper() + '_CELL'\n def _replaceWithVar(k):\n if isKApply(k) and isCellKLabel(k['label']):\n if len(k['args']) == 1 and not (isKApply(k['args'][0]) and isCellKLabel(k['args'][0]['label'])):\n config_var = _mkCellVar(k['label'])\n initial_substitution[config_var] = k['args'][0]\n return KApply(k['label'], [KVariable(config_var)])\n return k\n symbolic_config = traverseTopDown(configuration, _replaceWithVar)\n return (symbolic_config, initial_substitution)\n\ndef collapseDots(kast):\n \"\"\"Given a configuration with structural frames `...`, minimize the structural frames needed.\n\n - Input: a configuration, potentially with structural frames.\n - Output: the same configuration, with the amount of structural framing minimized.\n \"\"\"\n def _collapseDots(_kast):\n if isKApply(_kast):\n label = _kast['label']\n args = _kast['args']\n if isCellKLabel(label) and len(args) == 1 and args[0] == ktokenDots:\n return ktokenDots\n newArgs = [ arg for arg in args if arg != ktokenDots ]\n if isCellKLabel(label) and len(newArgs) == 0:\n return ktokenDots\n if len(newArgs) < len(args):\n newArgs.append(ktokenDots)\n return KApply(label, newArgs)\n elif isKRewrite(_kast):\n if _kast['lhs'] == ktokenDots:\n return ktokenDots\n return _kast\n return traverseBottomUp(kast, _collapseDots)\n\ndef pushDownRewrites(kast):\n \"\"\"Traverse a term and push rewrites down as far as possible.\n\n - Input: Kast term potentially with rewrites.\n - Output: Kast term with rewrites localized (or removed) as much as possible.\n \"\"\"\n def _pushDownRewrites(_kast):\n if isKRewrite(_kast):\n lhs = _kast['lhs']\n rhs = _kast['rhs']\n if lhs == rhs:\n return lhs\n if isKVariable(lhs) and isKVariable(rhs) and lhs['name'] == rhs['name']:\n return lhs\n if isKApply(lhs) and isKApply(rhs) and lhs['label'] == rhs['label'] and len(lhs['args']) == len(rhs['args']):\n newArgs = [ KRewrite(lArg, rArg) for (lArg, rArg) in zip(lhs['args'], rhs['args']) ]\n return KApply(lhs['label'], newArgs)\n if isKSequence(lhs) and isKSequence(rhs) and len(lhs['items']) > 0 and len(rhs['items']) > 0:\n if lhs['items'][0] == rhs['items'][0]:\n lowerRewrite = KRewrite(KSequence(lhs['items'][1:]), KSequence(rhs['items'][1:]))\n return KSequence([lhs['items'][0], lowerRewrite])\n if lhs['items'][-1] == rhs['items'][-1]:\n lowerRewrite = KRewrite(KSequence(lhs['items'][0:-1]), KSequence(rhs['items'][0:-1]))\n return KSequence([lowerRewrite, lhs['items'][-1]])\n if isKSequence(lhs) and len(lhs['items']) > 0 and isKVariable(lhs['items'][-1]) and isKVariable(rhs) and lhs['items'][-1] == rhs:\n return KSequence([KRewrite(KSequence(lhs['items'][0:-1]), KConstant(klabelEmptyK)), rhs])\n return _kast\n return traverseTopDown(kast, _pushDownRewrites)\n\ndef inlineCellMaps(kast):\n \"\"\"Ensure that cell map collections are printed nicely, not as Maps.\"\n\n - Input: kast term.\n - Output: kast term with cell maps inlined.\n \"\"\"\n def _inlineCellMaps(_kast):\n if isKApply(_kast) and _kast[\"label\"].endswith('CellMapItem'):\n mapKey = _kast[\"args\"][0]\n if isKApply(mapKey) and isCellKLabel(mapKey[\"label\"]):\n return _kast[\"args\"][1]\n return _kast\n return traverseBottomUp(kast, _inlineCellMaps)\n\ndef removeSemanticCasts(kast):\n \"\"\"Remove injected `#SemanticCast*` nodes in AST.\n\n - Input: kast (possibly) containing automatically injected `#SemanticCast*` KApply nodes.\n - Output: kast without the `#SemanticCast*` nodes.\n \"\"\"\n def _removeSemanticCasts(_kast):\n if isKApply(_kast) and len(_kast['args']) == 1 and _kast['label'].startswith('#SemanticCast'):\n return _kast['args'][0]\n return _kast\n return traverseBottomUp(kast, _removeSemanticCasts)\n\ndef markUselessVars(kast):\n \"\"\"Given a kast term as input with variables, return one where the useless vars are appropriately marked.\n\n - Input: A Kast term.\n - Output: Kast term with variables appropriately named.\n \"\"\"\n occurances = countVarOccurances(kast)\n subst = {}\n for v in occurances:\n if v.startswith('_') and occurances[v] > 1:\n subst[v] = KVariable(v[1:])\n elif (not v.startswith('_')) and occurances[v] == 1:\n subst[v] = KVariable('_' + v)\n return substitute(kast, subst)\n\ndef uselessVarsToDots(kast, keepVars = None):\n \"\"\"Structurally abstract away useless variables.\n\n - Input: kast term, and a requires clause and ensures clause.\n - Output: kast term with the useless vars structurally abstracted.\n \"\"\"\n initList = {}\n if keepVars is not None:\n for v in keepVars:\n if v not in initList:\n initList[v] = 1\n else:\n initList[v] += 1\n numOccurances = countVarOccurances(kast, numOccurances = initList)\n\n def _collapseUselessVars(_kast):\n if isKApply(_kast) and isCellKLabel(_kast['label']):\n newArgs = []\n for arg in _kast['args']:\n if isKVariable(arg) and numOccurances[arg['name']] == 1:\n newArgs.append(ktokenDots)\n else:\n newArgs.append(arg)\n return KApply(_kast['label'], newArgs)\n return _kast\n\n return traverseBottomUp(kast, _collapseUselessVars)\n\ndef labelsToDots(kast, labels):\n \"\"\"Abstract specific labels for printing.\n\n - Input: kast term, and list of labels to abstract.\n - Output: kast term with those labels abstracted.\n \"\"\"\n def _labelstoDots(k):\n if isKApply(k) and isCellKLabel(k['label']) and k['label'] in labels:\n return ktokenDots\n return k\n return traverseBottomUp(kast, _labelstoDots)\n\ndef onAttributes(kast, effect):\n if isKAs(kast):\n return KAs(kast['pattern'], kast['alias'], att = effect(kast['att']))\n elif isKRule(kast):\n return KRule(kast['body'], requires = kast['requires'], ensures = kast['ensures'], att = effect(kast['att']))\n elif isKClaim(kast):\n return KClaim(kast['body'], requires = kast['requires'], ensures = kast['ensures'], att = effect(kast['att']))\n elif isKContext(kast):\n return KContext(kast['body'], requires = kast['requires'], att = effect(kast['att']))\n elif isKBubble(kast):\n return KBubble(kast['sentenceType'], kast['contents'], att = effect(kast['att']))\n elif isKProduction(kast):\n return KProduction(kast['productionItems'], kast['sort'], att = effect(kast['att']))\n elif isKSyntaxAssociativity(kast):\n return KSyntaxAssociativity(kast['assoc'], tags = kast['tags'], att = effect(kast['att']))\n elif isKSyntaxPriority(kast):\n return KSyntaxPriority(priorities = kast['priorities'], att = effect(kast['att']))\n elif isKSyntaxSort(kast):\n return KSyntaxSort(kast['sort'], att = effect(kast['att']))\n elif isKSortSynonym(kast):\n return KSortSynonym(kast['newSort'], kast['oldSort'], att = effect(kast['att']))\n elif isKSyntaxLexical(kast):\n return KSyntaxLexical(kast['name'], kast['regex'], att = effect(kast['att']))\n elif isKFlatModule(kast):\n localSentences = [ onAttributes(sent, effect) for sent in kast['localSentences'] ]\n return KFlatModule(kast['name'], kast['imports'], localSentences, att = effect(kast['att']))\n elif isKDefinition(kast):\n modules = [ onAttributes(mod, effect) for mod in kast['modules'] ]\n requires = None if 'requires' not in kast else kast['requires']\n return KDefinition(kast['mainModule'], modules, requires = requires, att = effect(kast['att']))\n _fatal('No attributes for: ' + kast['node'] + '.')\n\ndef dedupeClauses(terms):\n \"\"\"Return a list of terms in the same order with duplicates removed.\n\n - Input: a list.\n - Output: a list with duplicates removed.\n \"\"\"\n newTerms = []\n for t in terms:\n if t not in newTerms:\n newTerms.append(t)\n return newTerms\n\ndef minimizeTerm(term, keepVars = None, abstractLabels = []):\n \"\"\"Minimize a K term for pretty-printing.\n\n - Input: kast term, and optionally requires and ensures clauses with constraints.\n - Output: kast term minimized.\n - Variables only used once will be removed.\n - Unused cells will be abstracted.\n - Attempt to remove useless conditions.\n \"\"\"\n term = inlineCellMaps(term)\n term = removeSemanticCasts(term)\n term = uselessVarsToDots(term, keepVars = keepVars)\n term = labelsToDots(term, abstractLabels)\n term = collapseDots(term)\n return term\n\ndef minimizeRule(rule, keepVars = []):\n \"\"\"Minimize a K rule or claim for pretty-printing.\n\n - Input: kast representing a K rule or claim.\n - Output: kast with the rule or claim minimized:\n - Variables only used once will be removed.\n - Unused cells will be abstracted.\n - Attempt to remove useless side-conditions.\n \"\"\"\n if not (isKRule(rule) or isKClaim(rule)):\n return rule\n\n ruleBody = rule['body']\n ruleRequires = rule['requires']\n ruleEnsures = rule['ensures']\n ruleAtts = rule['att']\n\n if ruleRequires is not None:\n ruleRequires = buildAssoc(KToken('true', 'Bool'), '_andBool_', dedupeClauses(flattenLabel('_andBool_', ruleRequires)))\n ruleRequires = simplifyBool(ruleRequires)\n\n if ruleEnsures is not None:\n ruleEnsures = buildAssoc(KToken('true', 'Bool'), '_andBool_', dedupeClauses(flattenLabel('_andBool_', ruleEnsures)))\n ruleEnsures = simplifyBool(ruleEnsures)\n\n ruleRequires = None if ruleRequires == KToken('true', 'Bool') else ruleRequires\n ruleEnsures = None if ruleEnsures == KToken('true', 'Bool') else ruleEnsures\n\n constrainedVars = [] if keepVars is None else keepVars\n if ruleRequires is not None:\n constrainedVars = constrainedVars + collectFreeVars(ruleRequires)\n if ruleEnsures is not None:\n constrainedVars = constrainedVars + collectFreeVars(ruleEnsures)\n ruleBody = minimizeTerm(ruleBody, keepVars = constrainedVars)\n\n if ruleRequires == KToken('true', 'Bool'):\n ruleRequires = None\n if isKRule(rule):\n return KRule(ruleBody, requires = ruleRequires, ensures = ruleEnsures, att = ruleAtts)\n else:\n return KClaim(ruleBody, requires = ruleRequires, ensures = ruleEnsures, att = ruleAtts)\n\ndef removeSourceMap(k):\n \"\"\"Remove source map information from a given definition.\n\n Input: A JSON encoded K object.\n Output: The same JSON encoded object, with all source information stripped.\n \"\"\"\n def _removeSourceMap(att):\n if isKAtt(att):\n atts = att['att']\n newAtts = { }\n for attKey in atts.keys():\n if attKey != 'org.kframework.attributes.Source' and attKey != 'org.kframework.attributes.Location':\n newAtts[attKey] = atts[attKey]\n return KAtt(atts = newAtts)\n return onAttributes(k, _removeSourceMap)\n\ndef readKastTerm(termPath):\n with open(termPath, \"r\") as termFile:\n return json.loads(termFile.read())['term']\n\ndef writeKDefinition(fileName, kDef, symbolTable):\n if not isKDefinition(kDef):\n _notif(\"Not a K Definition!\")\n print(kDef)\n sys.exit(1)\n specText = prettyPrintKast(kDef, symbolTable)\n with open(fileName, \"w\") as sfile:\n sfile.write(specText)\n _notif(\"Wrote spec file: \" + fileName)\n print(specText)\n sys.stdout.flush()\n return\n _fatal(\"Could not write spec file: \" + fileName)\n","repo_name":"c-vision/k","sub_path":"k-distribution/src/main/scripts/lib/pyk/kastManip.py","file_name":"kastManip.py","file_ext":"py","file_size_in_byte":24477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"}
+{"seq_id":"10129500249","text":"import spike2_data_puller as dp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom quantities import s, dimensionless\n#Written by Kyrus Mama\n\n\ndef spikes_events_histogram(file_name, index=-1, baseline=[b'BLANK', b'mineral oil'], bins=-1, bins_btw=5):\n \"\"\"\n builds a histogram which displays spike frequency in a time window file the spikes in file_name.\n Draws vertical lines at times where events occur.\n\n\n Parameters\n ----------\n file_name: The name of the file from which you wish to retrieve the data. Must include the path.\n eg: r'E:\\sniffer_data_spring_2019\\ParameterTest_OE2_022219_odors.smr'\n index: The index of the set of spikes that you want to plot in the histogram.\n if index is negative, it combines all the spikes (except the 0 case) and plots them.\n baseline: a list of all the baseline event labels. This is used to draw a black line for baseline events,\n and a red line for the rest.\n bins: The number of bins the user wants in the histogram.\n if negative, calculates number of bins using bins_btw.\n bins_btw: The number of bins that the user wants per event.\n If the events are evenly spaced, this becomes the number of bins between two events.\n If bins is non-negative, this value is ignored.\n\n Returns\n -------\n n: A list of the number of elements in each bin.\n\n Throws\n -------\n FileNotFoundError: No such file or directory file_name if file_name does not exit.\n \"\"\"\n trains = dp.get_spike_train(file_name)\n event_times, event_labels= dp.get_events(file_name)\n\n if index < 0:\n displayed_times = trains[1]\n for i in range(2, len(trains)):\n displayed_times = np.concatenate([displayed_times, trains[i]])\n else:\n displayed_times = trains[index]\n\n if bins < 1:\n bins = math.ceil(bins_btw * len(event_times))\n\n (n, q, w) = plt.hist(displayed_times, bins)\n for j in range(len(event_times)):\n if event_labels[j] in baseline:\n col = 'k'\n else:\n col = 'r'\n plt.axvline(x=event_times[j], color=col)\n print(event_labels)\n plt.show()\n return n\n\n\ndef averaged_spike_events_histogram(name_list, index=-1, baseline=[b'BLANK', b'mineral oil'], bins= -1, bins_btw=5, subset = False):\n \"\"\"\n builds a histogram which displays spike frequency of spikes in a time window averaged across spikes from all files in name_list\n Draws vertical lines at times where events occur.\n\n\n Parameters\n ----------\n name_list: The list of file names from which you wish to retrieve the data. Must include the path.\n eg: [r'E:\\sniffer_data_spring_2019\\ParameterTest_OE2_022219_odors.smr']\n index: The index of the set of spikes that you want to plot in the histogram.\n if index is negative, it combines all the spikes (except the 0 case) and plots them.\n baseline: a list of all the baseline event labels. This is used to draw a black line for baseline events,\n and a red line for the rest.\n bins: The number of bins the user wants in the histogram.\n if negative, calculates number of bins using bins_btw.\n bins_btw: The number of bins that the user wants per event.\n If the events are evenly spaced, this becomes the number of bins between two events.\n If bins is non-negative, this value is ignored.\n\n Returns\n -------\n n: A list of the number of elements in each bin.\n\n Throws\n -------\n FileNotFoundError: No such file or directory file_name if file_name does not exit.\n \"\"\"\n all_display_times = np.zeros(0)\n for file_name in name_list:\n trains = dp.get_spike_train(file_name)\n event_times, event_labels = dp.get_events(file_name)\n\n if index < 0:\n displayed_times = trains[1]\n for i in range(2, len(trains)):\n displayed_times = np.concatenate([displayed_times, trains[i]])\n else:\n displayed_times = trains[index]\n\n all_display_times = np.concatenate([all_display_times, displayed_times])\n\n if bins < 1:\n bins = math.ceil(bins_btw * len(event_times))\n\n if subset:\n # subset_display_times = np.random.choice(all_display_times, int(len(all_display_times) / len(name_list)),\n # replace=False)\n subset_display_times = all_display_times[::len(name_list)]\n else:\n subset_display_times = all_display_times\n\n (n, q, w) = plt.hist(subset_display_times, bins)\n for j in range(len(event_times)):\n if event_labels[j] in baseline:\n col = 'k'\n else:\n col = 'r'\n plt.axvline(x=event_times[j], color=col)\n print(event_labels)\n plt.show()\n return n\n\n\ndef break_by_event(spike_times, event_times, event_names, before_event, after_event):\n \"\"\"\n helper function\n groups spikes up based on which event they are near\n\n\n Parameters\n ----------\n spike_times: A list of floats each of which represents the timing of a spike\n event_times: A list of float lists each of which represents the timing of a particular type of event\n event_names: A list of event names, which correspond to the elements of event times. Must have the same length as\n event_times. event_times[i] is a list of times that the event event_names[i] occurred.\n before_event: A float. A spike is considered to be near and before an event if its timing is less than the\n events timing and if (the events timing) - (its timing) is less than before_event\n after_event: A float. A spike is considered to be near and after an event if its timing is greater than the\n events timing and if (its timing) - (the events timing) is less than after_event\n\n Returns\n -------\n lst: A list of numpy arrays, with the same length as event_names . Each numpy array contains\n a list of spike timings that are near the events of the corresponding event_names\n \"\"\"\n lst = []\n for e in range(len(event_names)):\n sie = np.zeros(0)\n for ei in event_times[e]:\n relative_times = spike_times*s - ei\n t_spikes = relative_times[np.where(np.logical_and(- before_event < relative_times, relative_times < after_event))]\n sie = np.concatenate([sie, t_spikes])\n # sie = np.random.choice(sie, int(len(sie)/len(event_times[e])))\n # print(len(event_times[e]))\n sie = sie[::len(event_times[e])]\n lst.append(sie)\n return lst\n\n\ndef group_events(event_times, event_names, grouped_event_times=[], grouped_event_names=[], skip_last_event=False):\n \"\"\"\n helper function\n groups the timing of events based on the event names.\n Can be used to prepare for break_by_event. The returned lists satisfy both event_times and event_names conditions.\n\n Parameters\n ----------\n event_times: The list of times that different events took place\n event_names: The list of names of events, corresponding to event_times. Must have the same length as event_times.\n event_names[i] happened at event_times[i]\n grouped_event_times: A list of float lists. The outer list contains a list of times for each event.\n grouped_event_names: A list of event names that corresponds to grouped_event_times. Must have the same length as\n grouped_event_times. Must not contain duplicates.\n grouped_event_names[i] happened at grouped_event_times[i][0], ... grouped_event_times[i][n],\n where n=len(grouped_event_times[i])\n skip_last_event: if True, do not include the last event in event_names in the returned lists.\n\n Returns\n -------\n grouped_event_times, grouped_event_names\n after adding the groupings from event_times and event_names\n\n Throws\n -------\n ValueError grouped_event_names must not contain duplicates if grouped_event_names contains duplicates.\n \"\"\"\n assert len(event_times) == len(event_names)\n assert len(grouped_event_times) == len(grouped_event_names)\n\n for i in range(len(event_names) - (1 if skip_last_event else 0)):\n if event_names[i] in grouped_event_names:\n loc = grouped_event_names.index(event_names[i])\n grouped_event_times[loc].append(event_times[i])\n else:\n grouped_event_names.append(event_names[i])\n grouped_event_times.append([event_times[i]])\n\n return grouped_event_times, grouped_event_names\n\n\ndef grouped_spikes_events_histogram(file_name, *args, index=-1, baseline=[b'BLANK', b'mineral oil'],\n bins_btw=5, before_event=30.*s, after_event=40.*s, skip_last_event=False):\n \"\"\"\n for each unique event denoted by event label, display a histogram which displays spike frequency in a time window\n around the occurrence of that event.\n Draws vertical lines at times where events occur.\n If multiple events of the same type occur, average them together\n\n Parameters\n ----------\n file_name: The name of the file from which you wish to retrieve the data. Must include the path.\n eg: r'E:\\sniffer_data_spring_2019\\ParameterTest_OE2_022219_odors.smr'\n index: The index of the set of spikes that you want to plot in the histogram.\n if index is negative, it combines all the spikes (except the 0 case) and plots them.\n baseline: a list of all the baseline event labels. This is used to draw a black line for baseline events,\n and a red line for the rest.\n bins_btw: The number of bins that the user wants per event.\n before_event: A float. A spike is considered to be near and before an event if its timing is less than the\n events timing and if (the events timing) - (its timing) is less than before_event\n after_event: A float. A spike is considered to be near and after an event if its timing is greater than the\n events timing and if (its timing) - (the events timing) is less than after_event\n skip_last_event: if True, do not include the last event in event_names in the returned lists.\n\n Returns nothing\n\n Throws FileNotFoundError: No such file or directory file_name if file_name does not exit.\n \"\"\"\n trains = dp.get_spike_train(file_name)\n event_times, event_labels = dp.get_events(file_name)\n print(event_times)\n grouped_times, grouped_labels = group_events(event_times, event_labels, skip_last_event=skip_last_event)\n\n ttal = [0]\n for arg in args:\n tta = event_times[len(event_times)-1] + before_event + after_event\n ttal.append(tta)\n event_times, event_labels = dp.get_events(arg)\n event_times = event_times + tta\n grouped_times, grouped_labels = group_events(event_times, event_labels, grouped_times, grouped_labels,\n skip_last_event=skip_last_event)\n\n if index < 0:\n displayed_times = trains[1]\n for i in range(2, len(trains)):\n displayed_times = np.concatenate([displayed_times, trains[i]])\n else:\n displayed_times = trains[index]\n\n c = 1\n for arg in args:\n tta2 = ttal[c]\n c = c+1\n trains = dp.get_spike_train(arg)\n trains = trains*s + tta2\n if index < 0:\n for i in range(1, len(trains)):\n displayed_times = np.concatenate([displayed_times, trains[i]])\n else:\n displayed_times = np.concatenate([displayed_times, trains[index]])\n\n lst = break_by_event(displayed_times, grouped_times, grouped_labels, before_event, after_event)\n\n bins = bins_btw\n cols = len(lst)\n\n subplots = []\n plt.figure(1)\n yu_lim = 0\n yd_lim = len(max(lst, key=len))\n for i in range(cols):\n subplots.append(plt.subplot(1, cols, i+1))\n n, bbb, ppp = plt.hist(lst[i], bins)\n yu_lim = max(max(n), yu_lim)\n yd_lim = min(min(n), yd_lim)\n tr = \"\"+str(grouped_labels[i])+\"\\n\"+str(len(grouped_times[i]))+\" time\" + \\\n (\"\" if len(grouped_times[i]) == 1 else \"s\")\n plt.title(tr)\n if grouped_labels[i] in baseline:\n col = 'k'\n else:\n col = 'r'\n plt.axvline(x=0, color=col)\n\n for subs in subplots:\n subs.set_ylim([max(yd_lim - 100, 0), yu_lim+100])\n if subs!=subplots[0]:\n subs.set_yticklabels([])\n\n plt.show()\n\n\nfname = r'E:\\sniffer_data_spring_2019\\ParameterTest_OE2_022219_odors.smr'\nname_list = [r'E:\\sniffer_data_spring_2019\\ParameterTest_OE2_022119_odors.smr',\n r'E:\\sniffer_data_spring_2019\\ParameterTest_OE1_022119_odors.smr']\n# (spikes_events_histogram(fname, bins_btw=30))\n# averaged_spike_events_histogram(name_list, bins_btw=40, subset=True)\n# print(dp.get_spike_train(fname))\ngrouped_spikes_events_histogram(fname, bins_btw=30, skip_last_event=True)\n# grouped_spikes_events_histogram(name_list[0], name_list[1], bins_btw=30, skip_last_event=True)\n","repo_name":"KyrusMama/CPLab-Data-Analysis","sub_path":"spike_histogram.py","file_name":"spike_histogram.py","file_ext":"py","file_size_in_byte":12872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7843896017","text":"from splashpy import Framework\n\nclass OrderPickingHelper:\n \"\"\"\n Collection of Static Functions to manage Order Stock Picking\n \"\"\"\n\n @staticmethod\n def is_enabled():\n \"\"\"\n Check if Auto-Picking Features is Active\n\n :return: bool\n \"\"\"\n return True\n\n @staticmethod\n def confirm(picking):\n \"\"\"\n Confirm All Stock Picking Moves\n\n :return: None\n \"\"\"\n if not OrderPickingHelper.is_enabled() or picking.state == \"cancel\":\n return\n\n picking.action_confirm()\n\n @staticmethod\n def done(picking):\n \"\"\"\n Validate All Stock Picking Moves\n\n :param picking: stock.picking\n :return: None\n \"\"\"\n # ====================================================================#\n # Safety Checks\n if not OrderPickingHelper.is_enabled() or picking.state == \"cancel\":\n return\n # ====================================================================#\n # Load Odoo System Manager\n from odoo.addons.splashsync.helpers import SystemManager\n # ====================================================================#\n # Odoo 15 & 16\n if SystemManager.compare_version(15) >= 0:\n Framework.log().warn(\"Picking Done: Odoo 15+\")\n picking.action_set_quantities_to_reservation()\n picking._action_done()\n # ====================================================================#\n # Odoo 14\n elif SystemManager.compare_version(14) >= 0:\n Framework.log().warn(\"Picking Done: Odoo 14\")\n from odoo.tools.float_utils import float_is_zero\n for move_line in picking.move_line_ids.filtered(lambda m: float_is_zero(m.qty_done, precision_rounding=m.product_uom_id.rounding)):\n move_line.qty_done = move_line.product_qty\n picking._action_done()\n # ====================================================================#\n # Odoo 12 & 13\n if SystemManager.compare_version(12) >= 0:\n Framework.log().warn(\"Picking Done: Odoo 12\")\n from odoo.tools.float_utils import float_is_zero\n for move_line in picking.move_ids_without_package.filtered(lambda m: float_is_zero(m.quantity_done, precision_rounding=m.product_uom.rounding)):\n move_line.quantity_done = move_line.product_qty\n picking.action_done()\n\n\n @staticmethod\n def get_reserved_qty(order_line):\n \"\"\"\n Get Reserved Qty for a Product\n\n :return: int\n \"\"\"\n reserved_qty = 0\n # ====================================================================#\n # Walk on Order Pickings\n for move in order_line.move_ids:\n # ====================================================================#\n # Pickings Canceled\n if move.state == \"cancel\":\n continue\n reserved_qty += move.product_uom_qty\n\n # Framework.log().dump({\"move\": move.id })\n\n return int(reserved_qty)\n","repo_name":"SplashSync/Odoo","sub_path":"odoo/addons/splashsync/helpers/objects/orders/picking.py","file_name":"picking.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34751005794","text":"import cv2\nimport imutils\nimport socketio\nimport time\nfrom pyzbar import pyzbar\nfrom gpio_control import *\n\nWS_URL = \"https://25d5-200-131-11-6.ngrok.io/\"\nSEND_TOPIC = \"get-in\"\nRECEIVE_TOPIC = \"return-in\"\n\ncap = cv2.VideoCapture(0)\n\nsio = socketio.Client()\n\ndef release_ratchet():\n print(\"Acesso permitido.\")\n \n open()\n\n\ndef access_denied():\n print(\"Acesso negado.\")\n \n close()\n\n\n@sio.event\ndef connect():\n print(\"I'm connected!\")\n\n\n@sio.event\ndef connect_error(data):\n print(\"The connection failed!\")\n\n\n@sio.event\ndef disconnect():\n print(\"I'm disconnected!\")\n\n\n@sio.on(RECEIVE_TOPIC)\ndef on_message(data):\n print(data)\n \n if \"error\" in data:\n \tif not data[\"error\"]:\n \t\trelease_ratchet()\n \telse:\n \t\taccess_denied()\n\n\ndef make_request(code):\n sio.emit(SEND_TOPIC, {\"ra\": code})\n\n\ndef main():\n\tlast_code = \"\"\n\t\n\twhile True:\n\t\tis_ok, img = cap.read()\n\t\t\n\t\tif is_ok:\n\t\t\n\t\t\timg = imutils.resize(img, 400)\n\n\t\t\tqrcodes = pyzbar.decode(img)\n\n\t\t\tfor qrcode in qrcodes:\n\t\t\t\t(x, y, w, h) = qrcode.rect\n\t\t\t\tcv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n \n\t\t\t\tqrcodedata = qrcode.data.decode(\"utf-8\")\n\t\t\t\tqrcodetype = qrcode.type\n\n\t\t\t\ttext = f\"{qrcodedata}\"\n\n\t\t\t\tif text != last_code:\n\t\t\t\t\tlast_code = qrcodedata\n\t\t\t\t\tprint(text)\n\t\t\t\t\tmake_request(qrcodedata)\n\t\t\t\telse:\n\t\t\t\t\ttime.sleep(1.0)\n\t\t\t\t\t\n\t\t\t\tcv2.putText(\n img,\n text,\n (x, y - 10),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n (0, 0, 255),\n 2,\n )\n\n\t\t\tcv2.imshow(\"qrcode detector\", img)\n\t\t\tif cv2.waitKey(1) == ord(\"q\"):\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Erro ao iniciar captura.\")\n\t\t\tbreak\n\t\t\t\nif __name__ == \"__main__\":\n config_gpio()\n\n sio.connect(WS_URL)\n\n main()\n\ncap.release()\ncv2.destroyAllWindows()\nsio.disconnect()\n","repo_name":"IFMuz-Experience/catraca_digital","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"42134246002","text":"from abc import ABC\nfrom homework_02.exceptions import LowFuelError, NotEnoughFuel\n\nclass Vehicle(ABC):\n started = False\n weight = 0\n fuel = 0\n fuel_consumption = 0\n\n def __init__(self, weight, fuel, fuel_con):\n self.weight = weight\n self.fuel = fuel\n self.fuel_consumption = fuel_con\n\n def start(self):\n if self.started is False:\n if self.fuel > 0:\n self.started = True\n else:\n raise LowFuelError(\"No fuel to use vehicle!\")\n\n def move(self, distance):\n if self.fuel // self.fuel_consumption < distance:\n raise NotEnoughFuel(\"Not enough fuel to go this far\")\n else:\n self.fuel -= distance * self.fuel_consumption\n","repo_name":"Bratislava2003/Homeworks","sub_path":"homework_02/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27986922724","text":"''' Rev.ai API-based Converter classes '''\n\nimport logging\nimport os\nimport time\n\nfrom pliers.stimuli.text import TextStim, ComplexTextStim\nfrom pliers.utils import attempt_to_import, verify_dependencies\nfrom pliers.converters.audio import AudioToTextConverter\nfrom pliers.transformers.api import APITransformer\n\nrev_ai = attempt_to_import('rev_ai')\nrev_ai_client = attempt_to_import('rev_ai.apiclient',\n 'rev_ai_client',\n ['RevAiAPIClient'])\n\n\nclass RevAISpeechAPIConverter(APITransformer, AudioToTextConverter):\n\n ''' Uses the Rev AI speech-to-text API to transcribe an audio file.\n\n Args:\n access_token (str): API credential access token. Must be passed\n explicitly or stored in the environment variable specified\n in the _env_keys field.\n timeout (int): Number of seconds to wait for audio transcription\n to finish. Defaults to 90 seconds.\n request_rate (int): Number of seconds to wait between polling the\n API for completion.\n language (str): Language included in the provided audio file.\n Must be a language supported by RevAI; for the full list,\n see their docs: https://docs.rev.ai/api/asynchronous/reference/#operation/SubmitTranscriptionJob!ct=application/json&path=language&t=request #:PEP8 -E501\n '''\n\n _env_keys = ('REVAI_ACCESS_TOKEN',)\n _log_attributes = ('access_token', 'timeout', 'request_rate')\n VERSION = '1.0'\n\n def __init__(self, access_token=None, timeout=1000, request_rate=5, language=\"en\"):\n verify_dependencies(['rev_ai_client'])\n if access_token is None:\n try:\n access_token = os.environ['REVAI_ACCESS_TOKEN']\n except KeyError:\n raise ValueError(\"A valid API key must be passed when a \"\n \"RevAISpeechAPIConverter is initialized.\")\n self.access_token = access_token\n self.timeout = timeout\n self.request_rate = request_rate\n self.client = rev_ai_client.RevAiAPIClient(access_token)\n self.language = language\n super().__init__()\n\n @property\n def api_keys(self):\n return [self.access_token]\n\n def check_valid_keys(self):\n try:\n account = self.client.get_account()\n if account.balance_seconds > 0:\n return True\n else:\n logging.warning(\"Insufficient balance for Rev.ai speech \"\n \"converter: {}\".format(account.balance_seconds))\n return False\n except Exception as e:\n logging.warning(str(e))\n return False\n\n def _convert(self, audio):\n verify_dependencies(['rev_ai'])\n msg = \"Beginning audio transcription with a timeout of %fs. Even for \"\\\n \"small audios, full transcription may take awhile.\" % self.timeout\n logging.warning(msg)\n\n if audio.url:\n job = self.client.submit_job_url(\n audio.url, language=self.language)\n else:\n with audio.get_filename() as filename:\n job = self.client.submit_job_local_file(\n filename, language=self.language)\n\n operation_start = time.time()\n response = self.client.get_job_details(job.id)\n while (response.status == rev_ai.JobStatus.IN_PROGRESS) and \\\n (time.time() - operation_start) < self.timeout:\n response = self.client.get_job_details(job.id)\n time.sleep(self.request_rate)\n\n if (time.time() - operation_start) >= self.timeout:\n msg = \"Conversion reached the timeout limit of %fs.\" % self.timeout\n logging.warning(msg)\n\n if response.status == rev_ai.JobStatus.FAILED:\n raise Exception('API failed: %s' % response.failure_detail)\n\n result = self.client.get_transcript_object(job.id)\n\n elements = []\n order = 0\n for m in result.monologues:\n for e in m.elements:\n if e.type_ == 'text':\n start = e.timestamp\n end = e.end_timestamp\n elements.append(TextStim(text=e.value,\n onset=start,\n duration=end-start,\n order=order))\n order += 1\n\n return ComplexTextStim(elements=elements, onset=audio.onset)\n","repo_name":"PsychoinformaticsLab/pliers","sub_path":"pliers/converters/api/revai.py","file_name":"revai.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"53"}
+{"seq_id":"25526799323","text":"#Student类为例,在Python中,定义类是通过class关键字:\r\nclass Student(object):\r\n pass\r\nbart = Student()\r\n\r\nprint(bart)\r\nprint(Student)\r\nbart.name = 'Bart Simpson'\r\nprint(bart.name)\r\n\r\n#__init__方法的第一个参数永远是self,表示创建的实例本身,因此,在__init__方法内部,就可以把各种属性绑定到self,因为self就指向创建的实例本身\r\nclass Student1(object):\r\n\r\n def __init__(self,name,score):\r\n self.name = name\r\n self.score = score\r\n\r\n#在创建实例的时候,就不能传入空的参数了,必须传入与__init__方法匹配的参数,但self不需要传,Python解释器自己会把实例变量传进去\r\nbart1 = Student1(\"hukaijia\",100)\r\nprint(bart1.name)\r\nprint(bart1.score)\r\n\r\ndef print_score(std):\r\n print('%s: %s' % (std.name, std.score))\r\n\r\nprint_score(bart1)\r\n\r\n#Student实例本身就拥有这些数据,要访问这些数据,就没有必要从外面的函数去访问,可以直接在Student类的内部定义访问数据的函数,这样,就把“数据”给封装起来了。这些封装数据的函数是和Student类本身是关联起来的,我们称之为类的方法\r\n\r\nclass Student2(object):\r\n\r\n def __init__(self, name, score):\r\n self.name = name\r\n self.score = score\r\n\r\n def print_score(self):\r\n print('%s: %s' % (self.name, self.score))\r\n\r\n def get_grade(self):\r\n if self.score >= 90:\r\n return 'A'\r\n elif self.score >= 60:\r\n return 'B'\r\n else:\r\n return 'C'\r\n\r\nbart2 = Student2(\"hukaijia3\",1003)\r\nbart2.print_score()\r\nprint(bart2.get_grade())\r\n\r\n#如果要让内部属性不被外部访问,可以把属性的名称前加上两个下划线__,在Python中,实例的变量名如果以__开头,就变成了一个私有变量(private),只有内部可以访问,外部不能访问,所以,我们把Student类改一改:\r\nclass Student3(object):\r\n\r\n def __init__(self, name, score):\r\n self.name = name\r\n self.score = score\r\n\r\n # def get_name(self):\r\n # return self.__name\r\n #\r\n # def get_score(self):\r\n # return self.__score\r\n #\r\n # def print_score(self):\r\n # print('%s: %s' % (self.__name, self.__score))\r\n\r\nbart3 = Student3(\"hukaijia4\",1004)\r\n# print(bart3.get_name())\r\n# # bart3.print_score()\r\n\r\nclass Animal(object):\r\n def run(self):\r\n print(\"Animal is running...\")\r\n\r\n#当我们需要编写Dog和Cat类时,就可以直接从Animal类继承\r\n\r\nclass Dog(Animal):\r\n pass\r\n\r\nclass Cat(Animal):\r\n pass\r\n\r\ndog = Dog()\r\ndog.run()\r\n\r\nclass Dog1(Animal):\r\n\r\n def run(self):\r\n print('Dog is running...')\r\n\r\n def eat(self):\r\n print('Eating meat...')\r\n\r\ndog1 = Dog1()\r\ndog1.run()\r\n\r\na = list() # a是list类型\r\nb = Animal() # b是Animal类型\r\nc = Dog() # c是Dog类型\r\n\r\nprint(isinstance(a, list))\r\nprint(isinstance(b, Animal))\r\nprint(isinstance(c, Dog))\r\n\r\ndef run_twice(animal):\r\n animal.run()\r\n animal.run()\r\n\r\nrun_twice(Dog1())\r\n\r\n#获取对象信息\r\n#使用type() 判断对象类型,使用type()函数\r\nprint(\"type()\",type(123))\r\n\r\n#isinstance() class的继承关系来说,使用type()就很不方便。我们要判断class的类型,可以使用isinstance()函数\r\nprint(\"isinstance()\",isinstance(b, Animal))\r\n\r\n#dir()函数,它返回一个包含字符串的list,比如,获得一个str对象的所有属性和方法\r\nprint(dir('ABC'))\r\n","repo_name":"hkj123/python","sub_path":"oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"43012641334","text":"import nltk\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport functools\nimport spacy\n\n\nclass TextPreprocessing():\n \"\"\" This class the preprocessing of a documents (text field) that are contained into a pandas DataFrame.\n It's possible to perform the following step:\n 1. Text standardization with data cleaning (removal of special characters, numbers, link etc.).\n 2. Removal of stopwords.\n 3. Lemmatization.\n\n Args:\n -----\n lemmatization (boolean) >>> Set it to True if you want to perform the lemmatization step.\n Defalt is equal to False.\n standardize (boolean) >>> Set it to True if you want to perform the standardization step.\n Defalt is equal to True.\n chr_to_remove (list) >>> List of strings (regex) that represent the vector of special char or string that\n you want to remove.\n chr_to_keep (regex) >>> regex that represent the char that you want to keep. By default this method return\n only the letters of english alphabet. If you want to keep another special character\n you can specify this by setting this field.\n language (string) >>> Optional. Language of the text that you want to analyze.\n Default is 'en' (English).\n \"\"\"\n\n def __init__(self, lemmatization=False,\n standardize=True,\n remove_stopwords=True,\n chr_to_remove=[r\"http\\S+\", r\"http\", r\"@\\S+\", r\"@\", r\"\"],\n chr_to_keep=r\"[^A-Za-z]\",\n language='en'):\n\n self.lemmatization = lemmatization\n self.standardize = standardize\n self.chr_to_remove = chr_to_remove\n self.chr_to_keep = chr_to_keep\n self.language = language\n\n # Stopwords\n self.remove_stopwords = remove_stopwords\n if self.remove_stopwords:\n if self.language == 'en':\n self.stoplist = stopwords.words('english')\n elif self.language == 'it':\n self.stoplist = stopwords.words('italian')\n else:\n raise Exception(\"Invalid language\")\n else:\n self.stoplist = []\n\n def is_null(self, text):\n return text.isspace()\n\n def standardize_text(self, df, text_field):\n\n for regexp in self.chr_to_remove:\n df[text_field] = df[text_field].str.replace(regexp, \"\")\n\n df[text_field] = df[text_field].str.replace(self.chr_to_keep, \" \") # we not consider numbers\n df[text_field] = df[text_field].str.lower()\n return df\n\n def rm_stopwords(self, text):\n clearlist = [word for word in text if word not in self.stoplist]\n return clearlist\n\n def lemmatizer(self, text):\n if self.language == 'it':\n nlp = spacy.load('it_core_news_sm')\n else:\n nlp = spacy.load(self.language)\n sent = []\n doc = nlp(\" \".join(text))\n for word in doc:\n sent.append(word.lemma_)\n return sent\n\n def tokenization(self, text, min_len):\n tokenizer = RegexpTokenizer(r'\\w+')\n token_list = tokenizer.tokenize(text)\n token_list = [token for token in token_list if len(token) > min_len]\n token_list = [token for token in token_list if token not in self.stoplist]\n return token_list\n\n def fit(self, data_df, field, min_len=3):\n \"\"\"\n Args:\n -----\n data_df (pandas.DataFrame) >>> dataframe that contains the documents and the text field to process.\n field (string) >>> name of the field (column) that contain the text to process.\n min_len (int) >>> minimum length of the word\n\n Returns:\n --------\n pandas.DataFrame that are the copy of the original dataframe plus a column that contain the clean tokens\n (\"tokens\") and (if computed) another field with the lemma of these tokens (\"lemma\").\n \"\"\"\n\n # Drop fields which contain only space char\n print(\"Data cleaning...\")\n df = data_df[~data_df[field].apply(self.is_null)]\n\n # Standardization\n if self.standardize:\n print(\"Standardization...\")\n df = self.standardize_text(df, field)\n\n # Token extraction\n print(\"Tokenization...\")\n df[\"tokens\"] = df[field].apply(self.tokenization, min_len=min_len)\n\n # Stopwords\n if self.remove_stopwords:\n print(\"Removing stopwords...\")\n df[\"tokens\"] = df[\"tokens\"].apply(self.rm_stopwords)\n\n # Lemmatization\n if self.lemmatization:\n print(\"Lemmatization...\")\n df[\"lemma\"] = df[\"tokens\"].apply(self.lemmatizer)\n print(\"Finish\")\n\n return df\n\n\nclass VectorizeData():\n\n \"\"\" This class compute the vectorization of a list of text tokens.\n Args:\n -----\n method (string) >>> The metric used to transform the feature data. The choices are \"binary\", \"count\", \"tf\" or \"tf-idf\".\n Default is \"tf-idf\".\n\n \"\"\"\n\n def __init__(self, method='tf-idf'):\n self.method = method\n\n def fit(self, train_data, test_data = None, skipper = 'word', max_features = 60000):\n \"\"\"\n Args:\n -----\n train_data (pandas.Series) >>> column of the training set dataframe that contains the tokens to process.\n test_data (pandas.Series) >>> column of the test set dataframe that contains the tokens to process.\n\n Returns:\n --------\n a tuple contains the two feature matrix (training data and test data).\n \"\"\"\n\n if self.method == 'binary':\n count_vectorizer = TfidfVectorizer(analyzer = skipper, binary=True, use_idf=False, norm=None, max_features=max_features)\n elif self.method == 'count':\n count_vectorizer = CountVectorizer(analyzer = skipper)\n elif self.method == 'tf':\n count_vectorizer = TfidfVectorizer(analyzer = skipper, use_idf=False, max_features=max_features)\n elif self.method == 'tf-idf':\n count_vectorizer = TfidfVectorizer(analyzer = skipper, max_features=max_features)\n else:\n raise Exception(\"Invalid method. Use: binary, tf or tf-idf\")\n \n if skipper == 'word':\n x_train = train_data.apply(str)\n else:\n x_train = train_data\n\n transformed_train_data = count_vectorizer.fit_transform(x_train)\n if test_data is not None:\n if skipper == 'word':\n x_test = test_data.apply(str)\n else:\n x_test = test_data\n transformed_test_data = count_vectorizer.transform(x_test)\n return transformed_train_data, transformed_test_data, count_vectorizer\n else:\n return transformed_train_data, count_vectorizer\n\n\ndef get_n_skipgrams(tokens, n, k):\n \"\"\"\n Fuction to extract the skip gram of a given list of tokens\n Args:\n -----\n tokens (list of strings) >>> list of tokens extracted by using the TextPreprocessing class.\n n (int) >>> number of gram.\n k (int) >>> skip param for the skip gram.\n \n Returns:\n --------\n a list of tuple that contains the skip gram.\n \"\"\"\n \n sent = \" \".join(tokens).split()\n return list(nltk.skipgrams(sent, n, k))\n","repo_name":"Tostox/mltools","sub_path":"mltools/textMining/text_preprocessing.py","file_name":"text_preprocessing.py","file_ext":"py","file_size_in_byte":7610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"36569596436","text":"import os\nimport csv\n\ndef percentage(part, whole):\n return 100 * float(part)/float(whole)\n\n\ncsvpath = os.path.join(\"..\", \"Resources\", \"election_data.csv\")\nouputfile = \"election.txt\"\n\n# declaring lists\nvoter_id = []\ncounty = []\ncandidate = []\n# declaring vote variables\nvotes = 0\nvotes_for_li = 0\nvotes_for_correy = 0\nvotes_for_khan = 0\nvotes_for_o_tooley = 0\n# declaring percent variables\npercent_for_li = 0\npercent_for_correy = 0\npercent_for_khan = 0\npercent_for_o_tooley = 0\n# declaring winner\nwinner = \"\"\n\n# extracting data from csv file\nwith open(csvpath, \"r\") as infile:\n csv_reader = csv.reader(infile, delimiter=\",\")\n csv_header = next(csv_reader)\n for Voter_ID, County, Candidate in csv_reader:\n voter_id.append(Voter_ID)\n county.append(County)\n candidate.append(Candidate)\n votes += 1\ninfile.close()\n\n# counting votes\ncounter = 0\nfor vote in candidate:\n if vote == \"Khan\":\n votes_for_khan += 1\n elif vote == \"Correy\":\n votes_for_correy += 1\n elif vote == \"Li\":\n votes_for_li += 1\n else :\n votes_for_o_tooley += 1\n\n#finding popular vote\ncandidate_votes = [votes_for_li, votes_for_correy, votes_for_khan, votes_for_o_tooley]\npopular_candidate = max(candidate_votes)\n\n#finding winner\nif popular_candidate == votes_for_li:\n winner = \"Li\"\nelif popular_candidate == votes_for_correy:\n winner = \"Correy\"\nelif popular_candidate == votes_for_khan:\n winner = \"Khan\"\nelse:\n winner = \"O'Toonley\"\n\n# calculating percentages\npercent_for_li = percentage(votes_for_li, votes)\npercent_for_correy = percentage(votes_for_correy, votes)\npercent_for_khan = percentage(votes_for_khan, votes)\npercent_for_o_tooley = percentage(votes_for_o_tooley, votes)\n\n# generating output for file\ntext_output = (f\"\"\"\nElection Results\n----------------------\nTotal Votes: {votes}\n----------------------\nKhan: {percent_for_khan} ({votes_for_khan})\nCorrey: {percent_for_correy} ({votes_for_correy})\nLi: {percent_for_li} ({votes_for_li})\nO'Tooley: {percent_for_o_tooley} ({votes_for_o_tooley})\n----------------------\nWinner: {winner}\n----------------------\n\"\"\")\n\n# file output\nwith open(ouputfile, \"w\", newline=\"\") as datafile:\n datafile.write(text_output)\n\n\n\n\n","repo_name":"tmartinez52/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9986841583","text":"#\n# File: ./src/vutils/cli/io.py\n# Author: Jiří Kučera \n# Date: 2021-07-11 16:17:41 +0200\n# Project: vutils-cli: Auxiliary library for writing CLI applications\n#\n# SPDX-License-Identifier: MIT\n#\n\"\"\"CLI input/output.\"\"\"\n\nimport sys\nfrom typing import TYPE_CHECKING, TextIO\n\nimport colorama\n\nif TYPE_CHECKING:\n from vutils.cli import StreamsProxyProtocolP\n\n\ndef nocolor(text: str) -> str:\n \"\"\"\n Make the text as it is.\n\n :param text: The text\n :return: the unchanged text\n \"\"\"\n return text\n\n\ndef red(text: str) -> str:\n \"\"\"\n Make the text red.\n\n :param text: The text\n :return: the text colored to red\n \"\"\"\n return (\n f\"{colorama.Style.BRIGHT}{colorama.Fore.RED}\"\n f\"{text}\"\n f\"{colorama.Style.RESET_ALL}\"\n )\n\n\ndef green(text: str) -> str:\n \"\"\"\n Make the text green.\n\n :param text: The text\n :return: the text colored to green\n \"\"\"\n return (\n f\"{colorama.Style.BRIGHT}{colorama.Fore.GREEN}\"\n f\"{text}\"\n f\"{colorama.Style.RESET_ALL}\"\n )\n\n\ndef brown(text: str) -> str:\n \"\"\"\n Make the text brown.\n\n :param text: The text\n :return: the text colored to brown\n \"\"\"\n return f\"{colorama.Fore.YELLOW}{text}{colorama.Fore.RESET}\"\n\n\ndef yellow(text: str) -> str:\n \"\"\"\n Make the text yellow.\n\n :param text: The text\n :return: the text colored to yellow\n \"\"\"\n return (\n f\"{colorama.Style.BRIGHT}{colorama.Fore.LIGHTYELLOW_EX}\"\n f\"{text}\"\n f\"{colorama.Style.RESET_ALL}\"\n )\n\n\ndef blue(text: str) -> str:\n \"\"\"\n Make the text blue.\n\n :param text: The text\n :return: the text colored to blue\n \"\"\"\n return (\n f\"{colorama.Style.BRIGHT}{colorama.Fore.BLUE}\"\n f\"{text}\"\n f\"{colorama.Style.RESET_ALL}\"\n )\n\n\nclass StreamsProxyMixin:\n \"\"\"\n I/O streams proxy mixin.\n\n Mixin that provides interface to manipulating streams. Should be used\n together with `ApplicationMixin` and `LoggerMixin`.\n \"\"\"\n\n def __init__(self: \"StreamsProxyProtocolP\") -> None:\n \"\"\"\n Initialize streams.\n\n Default streams are `sys.stdout` for the output stream and `sys.stderr`\n for the error output stream.\n \"\"\"\n self.__output: TextIO = sys.stdout\n self.__errout: TextIO = sys.stderr\n\n def set_streams(\n self: \"StreamsProxyProtocolP\",\n ostream: \"TextIO | None\" = None,\n estream: \"TextIO | None\" = None,\n ) -> None:\n \"\"\"\n Set output and error output streams.\n\n :param ostream: The output stream\n :param estream: The error output stream\n\n Output stream and error output stream is not set if *ostream* and\n *estream* is `None`, respectively.\n \"\"\"\n if ostream is not None:\n self.__output = ostream\n if estream is not None:\n self.__errout = estream\n\n def wout(self: \"StreamsProxyProtocolP\", text: str) -> None:\n \"\"\"\n Write *text* to the output stream.\n\n :param text: The text\n \"\"\"\n self.__output.write(text)\n\n def werr(self: \"StreamsProxyProtocolP\", text: str) -> None:\n \"\"\"\n Write *text* to the error output stream.\n\n :param text: The text\n \"\"\"\n self.__errout.write(text)\n","repo_name":"i386x/vutils-cli","sub_path":"src/vutils/cli/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4505979974","text":"#! /usr/bin/env python3.4\n\nimport sys\nimport argparse\nimport logging as log\nimport os\nimport hashlib\nimport fnmatch\nimport pickle\n\nIGNORE_HIDDEN=True\nIGNORE_SYMLINKS=True\nPERSIST_FILELIST=True\nPERSIST_FILENAME='dedup.filelist'\nIDENTICAL_FILESIZE_CHECK=True\n\ndef parseArgs():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--directory', '-d', dest='directoryList', nargs='*', required=True)\n\tparser.add_argument('--include', '-i', dest='includePatternList', nargs='*', default=['*'],\n\t\thelp='List of patterns which files need to match to be in dedup list, e.g. \"--include *.jpg *.png\"')\n\tparser.add_argument('--exclude', '-e', dest='excludePatternList', nargs='*', default=[],\n\t\thelp='List of patterns which files should not match to be in dedup list, e.g. \"--exclude *.c *.h\"')\n\tparser.add_argument('--verbose', '-v', action='count', default=0)\n\tparser.add_argument('--exact', action='store_true', default=False,\n\t\thelp='This option will enable exact matching by filename for a file to be considered a duplicate')\n\tparser.add_argument('--delete', action='store_true', default=False)\n\tparser.add_argument('--list', action='store_true', default=True)\n\treturn parser.parse_args()\n\nclass File():\n\tdef __init__(self, filename, dirpath):\n\t\t# Immutable (desired) instance variables\n\t\tself.__filename = filename\n\t\tself.__dirpath = dirpath\n\t\t# \"Hash\" variables\n\t\tself.__hash = None\n\t\tself.__size = None\n\t\tself.__modified_timestamp = None\n\n\tdef __str__(self):\n\t\treturn \"{}, {} ({} bytes)\".format(self.__dirpath, self.__filename, self.__size)\n\n\tdef getFileName(self):\n\t\treturn self.__filename\n\n\tdef getFileSize(self):\n\t\treturn self.__size\n\n\tdef getHash(self):\n\t\treturn self.__hash\n\n\tdef getFilePath(self):\n\t\treturn os.path.join(self.__dirpath, self.__filename)\n\n\tdef updateHashFromFile(self, file):\n\t\t# Check that file is identical, i.e. has same name and directory path\n\t\t# before updating hash values\n\n\t\t# Method returns True if file was updated. This functionality was\n\t\t# added to allow support for an optimisation to prune the saved file\n\t\t# list.\n\t\tif self.__filename == file.__filename:\n\t\t\tif self.__dirpath == file.__dirpath:\n\t\t\t\tself.__hash = file.__hash\n\t\t\t\tself.__size = file.__size\n\t\t\t\tself.__modified_timestamp = file.__modified_timestamp\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef calculateHash(self):\n\t\tfilepath = os.path.join(self.__dirpath, self.__filename)\n\n\t\t# Do not recalculate if hash already exists and is unchanged\n\t\t# in both size and last modified time\n\t\tif self.__hash is not None:\n\t\t\tif self.__modified_timestamp == os.path.getmtime(filepath):\n\t\t\t\tif self.__size == os.path.getsize(filepath):\n\t\t\t\t\treturn\n\n\t\tself.__size = os.path.getsize(filepath)\n\t\tself.__modified_timestamp = os.path.getmtime(filepath)\n\n\t\t# Source:\n\t\t# http://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python\n\t\tmd5 = hashlib.md5()\n\t\twith open(filepath,'rb') as f:\n\t\t\tfor chunk in iter(lambda: f.read(md5.block_size * 4), b''):\n\t\t\t\t md5.update(chunk)\n\t\tself.__hash = md5.hexdigest()\n\ndef pathMatchesPattern(path, patternList):\n\tfor pattern in patternList:\n\t\tif fnmatch.fnmatch(path, pattern):\n\t\t\treturn True\n\treturn False\n\ndef pathMatchesExclusionRules(name, dirpath, excludePatternList, includePatternList):\n\tif IGNORE_HIDDEN and name.startswith('.'):\n\t\treturn True\n\tpath = os.path.join(dirpath, name)\n\tif not os.path.exists(path):\n\t\treturn True\n\tif IGNORE_SYMLINKS and os.path.islink(path):\n\t\treturn True\n\tif pathMatchesPattern(path, excludePatternList):\n\t\treturn True\n\treturn not pathMatchesPattern(path, includePatternList)\n\ndef generateFileList(directoryList, excludePatternList, includePatternList):\n\tfileList = []\n\tfor directory in directoryList:\n\t\tfor root, dirs, files in os.walk(directory, topdown=True):\n\t\t\tfor dir in dirs:\n\t\t\t\t# User provided filters are not applied to directories\n\t\t\t\tif pathMatchesExclusionRules(dir, root, [], ['*']):\n\t\t\t\t\tdirs.remove(dir)\n\n\t\t\tfor file in files:\n\t\t\t\tif not pathMatchesExclusionRules(file, root, excludePatternList, includePatternList):\n\t\t\t\t\tfileList.append(File(file, root))\n\treturn fileList\n\ndef saveFileList(fileList):\n\ttry:\n\t\twith open(PERSIST_FILENAME, 'wb') as f:\n\t\t\tpickle.dump(fileList, f, pickle.HIGHEST_PROTOCOL)\n\texcept Exception as err:\n\t\tlog.error(\"Failed to save file list: {}\".format(err))\n\ndef loadSavedFileList():\n\tfileList = []\n\ttry:\n\t\twith open(PERSIST_FILENAME, 'rb') as f:\n\t\t\tfileList = pickle.load(f)\n\texcept FileNotFoundError:\n\t\tpass\n\texcept Exception as err:\n\t\tlog.error(\"Failed to load saved file list: {}\".format(err))\n\treturn fileList\n\ndef isFileSizeIdenticalInList(fileList):\n\tsize = None\n\tfor file in fileList:\n\t\tif size is None:\n\t\t\tsize = file.getFileSize()\n\t\telse:\n\t\t\tif size != file.getFileSize():\n\t\t\t\treturn False\n\treturn True\n\n# TODO: wonder if I can merge this with isFileSizeIdenticalInList...\ndef isFileNameIdenticalInList(fileList):\n\tfilename = None\n\tfor file in fileList:\n\t\tif filename is None:\n\t\t\tfilename = file.getFileName()\n\t\telse:\n\t\t\tif filename != file.getFileName():\n\t\t\t\treturn False\n\treturn True\n\ndef main():\n\tlog.basicConfig(level=log.DEBUG, format='%(levelname)s: %(message)s')\n\n\targs = parseArgs()\n\tlog.info(args)\n\n\tfileList = generateFileList(args.directoryList, args.excludePatternList,\n\t\targs.includePatternList)\n\n\tlog.info(\"{} files read\".format(len(fileList)))\n\n\tif PERSIST_FILELIST:\n\t\tsavedFileList = loadSavedFileList()\n\t\t# Since each file is unique within a list and can only match a single\n\t\t# file in the other list -- we save some computation time by reducing\n\t\t# the size of the inner loop (savedFileList) with each successful\n\t\t# iteration of the outer loop (fileList).\n\t\tfor file in fileList:\n\t\t\tfor oldFile in savedFileList:\n\t\t\t\tif file.updateHashFromFile(oldFile):\n\t\t\t\t\tsavedFileList.remove(oldFile)\n\n\t# Organize the file list as a dictionary keyed by the files hash, thereby\n\t# collecting potential duplicate files together under the same key\n\tfileDictionary = {}\n\tfor file in fileList:\n\t\tfile.calculateHash()\n\t\tif file.getHash() in fileDictionary.keys():\n\t\t\tfileDictionary[file.getHash()].append(file)\n\t\telse:\n\t\t\tfileDictionary[file.getHash()] = [ file ]\n\n\t# Cache file list for future use to avoid having to recompute file hashes\n\tif PERSIST_FILELIST:\n\t\tsaveFileList(fileList)\n\n\t# Finally, iterate through the file dictionary and process user selected\n\t# actions on potential duplicate files\n\tfor hash, files in fileDictionary.items():\n\t\t# More than one file per hash indicates potential duplicate files\n\t\tif len(files) > 1:\n\t\t\tif args.list:\n\t\t\t\tprint(hash + \":\")\n\t\t\t\tfor f in files:\n\t\t\t\t\tprint(\"\\t{}\".format(f))\n\t\t\tif args.delete:\n\t\t\t\tif IDENTICAL_FILESIZE_CHECK and not isFileSizeIdenticalInList(files):\n\t\t\t\t\tlog.info(\"File size is not identical\")\n\t\t\t\t\tbreak\n\t\t\t\tif args.exact and not isFileNameIdenticalInList(files):\n\t\t\t\t\tlog.info(\"File name is not identical\")\n\t\t\t\t\tbreak\n\t\t\t\t# Delete all except the first file in list\n\t\t\t\tfor i in range(1,len(files)):\n\t\t\t\t\tlog.info(\"Deleting file {}\".format(files[i].getFilePath()))\n\t\t\t\t\tos.remove(files[i].getFilePath())\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"shirishb/scripts","sub_path":"file_deduper.py","file_name":"file_deduper.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7509814200","text":"import aiohttp\nimport asyncio\nimport pandas as pd\nimport time\n\n\ndef flatten_json(y):\n out = {}\n\n def flatten(x, name=\"\"):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + \"_\")\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + \"_\")\n i += 1\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out\n\n\nasync def fetch_congress_trading_data():\n max_pages = 418\n url = \"https://bff.capitoltrades.com/trades?page={}&pageSize=96\"\n\n async with aiohttp.ClientSession() as session:\n tasks = []\n for i in range(max_pages + 1):\n task = asyncio.ensure_future(fetch(session, url.format(i)))\n tasks.append(task)\n\n responses = await asyncio.gather(*tasks)\n data = [flatten_json(item) for sublist in responses for item in sublist]\n\n return data\n\n\nasync def fetch(session, url):\n try:\n async with session.get(url) as response:\n json_response = await response.json()\n return json_response.get(\"data\", [])\n except Exception as e:\n print(e)\n return []\n\n\nasync def run() -> pd.DataFrame:\n return pd.DataFrame(await fetch_congress_trading_data())\n\n\nif __name__ == \"__main__\":\n t1 = time.time()\n\n df = asyncio.run(run())\n df.to_excel(r\"C:\\Users\\chris\\trade\\curr_pos\\notebooks\\fun\\congress_trading_data.xlsx\", index=False)\n print(df.head())\n\n t2 = time.time()\n print(f\"Time taken: {t2 - t1} seconds\")\n","repo_name":"cleeclee123/ETF_Info","sub_path":"notebooks/fun/congress.py","file_name":"congress.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27601440294","text":"\nimport gzip\nimport io\nimport glob\nfrom concurrent import futures\n\n\n# 你有个程序要执行CPU密集型工作,你想让他利用多核CPU的优势来运行的快一点。\n# concurrent.futures 库提供了一个 ProcessPoolExecutor 类,可被用来在一个单独的Python解释器中执行计算密集型函数。\ndef find_robots(filename):\n '''\n Find all of the hosts that access robots.txt in a single log file\n '''\n robots = set()\n with open(filename) as f:\n for line in f:\n # print(line)\n fields = line.split()\n if fields[6] == '/robots.txt':\n robots.add(fields[0])\n return robots\n\n\ndef find_all_robots(logdir):\n '''\n Find all hosts across and entire sequence of files\n '''\n files = glob.glob(logdir+'/*.log')\n all_robots = set()\n for robots in map(find_robots, files):\n all_robots.update(robots)\n return all_robots\n\n\ndef find_all_robots2(logdir):\n files = glob.glob(logdir+'/*.log')\n all_robots = set()\n with futures.ProcessPoolExecutor() as pool:\n for robots in pool.map(find_robots, files):\n all_robots.update(robots)\n return all_robots\n\n\nif __name__ == '__main__':\n with open('logs/20201201.log') as f:\n data = f.read()\n for day in range(4, 14):\n with open(f'logs/202012{day:02}.log', 'w') as f:\n for i in range(400000):\n f.write(data)\n\n import time\n t = time.time()\n robots = find_all_robots('logs')\n print(time.time() - t)\n for ipaddr in robots:\n print(ipaddr)\n\n t = time.time()\n robots = find_all_robots2('logs')\n print(time.time() - t)\n for ipaddr in robots:\n print(ipaddr)\n\n import os\n for day in range(4, 14):\n os.remove(f'logs/202012{day:02}.log')\n\n\n# cd .\\c12_concurrency\\p08_perform_simple_parallel_programming\n# python .\\process_pool.py\n","repo_name":"mofei952/cookbook","sub_path":"c12_concurrency/p08_perform_simple_parallel_programming/02_find_robots.py","file_name":"02_find_robots.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30118984437","text":"import copy\r\n\r\nimport torch\r\nimport numpy as np\r\n\r\nfrom .mask_lookup_generator import MaskLookupGenerator\r\nfrom .constants import Constants \r\n\r\n\r\nclass PrefixMaskingFuncVocabFactory():\r\n # Factory for masking function over a batch of prefixes\r\n def __init__(self, tokenizer, beam_size, prefix_matching_cache_path=None, vocab_file=None, device='cpu', beam_implementation='megatron'):\r\n self.tokenizer = tokenizer\r\n self.beam_size = beam_size\r\n self.device = device\r\n self.beam_implementation = beam_implementation\r\n self.use_vocab = False\r\n\r\n self._mask_generator = MaskLookupGenerator(tokenizer, prefix_matching_cache_path, vocab_file, device=device)\r\n self.use_vocab = True\r\n \r\n def __call__(self, prefix_batch, **kwargs):\r\n lookups = self._mask_generator.get_lookups(prefix_batch)\r\n masking_func = PrefixMaskingVocab(lookups, len(self.tokenizer), self.beam_size, device=self.device, beam_implementation=self.beam_implementation)\r\n # !!! Add additional properties\r\n masking_func.tokenizer = self.tokenizer\r\n masking_func.prefix_batch = prefix_batch\r\n masking_func.force_prefix = True if 'force_prefix' in kwargs and kwargs['force_prefix'] else False\r\n masking_func.remove_space_marker = True if 'remove_space_marker' in kwargs and kwargs['remove_space_marker'] else False\r\n return masking_func\r\n\r\nclass PrefixMaskingVocab():\r\n def __init__(self, lookups, tokenizer_vocab_len, beam_size, device, beam_implementation):\r\n self.lookups = lookups\r\n self.tokenizer_vocab_len = tokenizer_vocab_len\r\n self.beam_size = beam_size\r\n self.first_step = True\r\n self.device = device\r\n self.beam_implementation = beam_implementation\r\n\r\n def _get_lookup(self, sequence_id, token_id):\r\n lookup = self.lookups[self._get_valid_sequence_id(sequence_id)]\r\n return lookup.get(token_id, {})\r\n\r\n def _get_masks(self, input_seq_index, next_token_ids):\r\n if not any(self.lookups):\r\n return None\r\n\r\n if not self.first_step:\r\n if torch.is_tensor(input_seq_index):\r\n input_seq_index = input_seq_index.cpu().numpy()\r\n if torch.is_tensor(next_token_ids):\r\n next_token_ids = next_token_ids.flatten().cpu().numpy()\r\n \r\n self.lookups = [self._get_lookup(sid, tid) for sid, tid in zip(input_seq_index, next_token_ids)]\r\n\r\n masks = torch.ones((len(self.lookups), self.tokenizer_vocab_len), dtype=Constants.TORCH_MASK_DTYPE, device=self.device)\r\n for i, lookup in enumerate(self.lookups):\r\n if not lookup:\r\n continue\r\n prefix_heads = [k for k in lookup.keys() if k is not None] # token ids match the prefix.\r\n mask = lookup.get(None)\r\n if prefix_heads:\r\n prefix_heads = torch.LongTensor(prefix_heads).to(self.device)\r\n if mask is None:\r\n if len(prefix_heads):\r\n masks[i].fill_(0).index_fill_(0, prefix_heads, 1)\r\n else:\r\n if self.first_step:\r\n # !!! Get space marke\r\n space_marker = Constants.SPACE_MARKER\r\n # !!! Get space marker token id\r\n space_marker_token_id = self.tokenizer.convert_tokens_to_ids(Constants.SPACE_MARKER)\r\n # !!! Add force prefix\r\n if self.force_prefix:\r\n prefix = self.prefix_batch[i].strip()\r\n _prefix = space_marker + prefix\r\n _prefix_token_id = self.tokenizer.convert_tokens_to_ids(_prefix)\r\n if _prefix_token_id != self.tokenizer.unk_token_id:\r\n mask.fill_(0).index_fill_(0, torch.tensor([_prefix_token_id]), 1)\r\n # !!! Add remove space marker\r\n if self.remove_space_marker:\r\n mask.index_fill_(0, torch.tensor([space_marker_token_id]), 0)\r\n\r\n if len(prefix_heads):\r\n # !!! Only fill for the following conditions\r\n if not self.first_step or (not self.force_prefix and not self.remove_space_marker):\r\n mask.index_fill_(0, prefix_heads, 1)\r\n masks[i] = mask\r\n # import pdb; pdb.set_trace()\r\n return masks\r\n \r\n def _get_valid_sequence_id(self, sid):\r\n # Megatron and HF don't assign beam sequence ids the same way, so this an ugly fix\r\n # William Buchwalter TODO: find a cleaner way to handle this\r\n if self.beam_implementation=='megatron':\r\n return sid // self.beam_size\r\n return sid\r\n\r\n \r\n def __call__(self, input_seq_index=None, next_token_ids=None):\r\n '''\r\n :param input_seq_index, next_token_ids: [LongTensor(shape=(d,))], where d=batch_size x beam_size(num_seq per sample), i.e., total number of sequences.\r\n `input_seq_index` = the index of input sequences where the `next_token_ids` is associated with.\r\n `next_token_ids` = the selected next token ids.\r\n Note: the two inputs should have the same shape and order as `next_token_ids`. All index/ids starts with 0.\r\n\r\n Example: with batch size=2, beam_size=2, the input_seq_index should have length=2x2=4.\r\n Let's assumed input_seq_index = [1, 0, 3, 3], next_token_ids = [300, 800, 90, 50]. It means\r\n - for sample 1: seq-0, seq-1 are selected next candidate, with 300 (for seq-0), 800 (for seq-1) as the best next token_ids respectively.\r\n - for sample 2: seq-3 is chosen (seq-2 is dropped), with 90 and 50 as best next token ids.\r\n '''\r\n masks = self._get_masks(input_seq_index, next_token_ids)\r\n\r\n self.first_step = False\r\n return masks","repo_name":"tonyliangli/TWT","sub_path":"model/prefix_matching/prefix_matching/prefix_masking_vocab.py","file_name":"prefix_masking_vocab.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"40212063499","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\n\n\nfrom filterpy.kalman import KalmanFilter\nfrom scipy.linalg import block_diag\nfrom filterpy.common import Q_discrete_white_noise\nfrom numpy.random import randn\n\nclass PosSensor(object):\n def __init__(self, pos=(0, 0), vel=(0, 0), noise_std=1.):\n self.vel = vel\n self.noise_std = noise_std\n self.pos = [pos[0], pos[1]]\n \n def read(self):\n self.pos[0] += self.vel[0]\n self.pos[1] += self.vel[1]\n \n return [self.pos[0] + randn() * self.noise_std,\n self.pos[1] + randn() * self.noise_std]\n\n\nN = 30 # number of iterations\ndt = 1.0 # time step\nR_std = 0.35\nQ_std = 0.04\n\nM_TO_FT = 1 / 0.3048\n\nsensor = PosSensor((0, 0), (2, .5), noise_std=R_std)\nzs = np.array([sensor.read() for _ in range(N)])\n\ntracker = KalmanFilter(dim_x=4, dim_z=2)\n\ntracker.F = np.array([[1, 0, dt, 0],\n [0, 1, 0, dt],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\ntracker.H = np.array([[M_TO_FT, 0, 0, 0],\n [0, M_TO_FT, 0, 0]])\n\ntracker.R = np.eye(2) * R_std**2\nq = Q_discrete_white_noise(dim=2, dt=dt, var=Q_std**2)\ntracker.Q[0,0] = q[0,0]\ntracker.Q[1,1] = q[0,0]\ntracker.Q[2,2] = q[1,1]\ntracker.Q[3,3] = q[1,1]\ntracker.Q[0,2] = q[0,1]\ntracker.Q[2,0] = q[0,1]\ntracker.Q[1,3] = q[0,1]\ntracker.Q[3,1] = q[0,1]\n\ntracker.x = np.array([[0, 0, 0, 0]]).T\ntracker.P = np.eye(4) * 500.\n\nxs, ys = [], []\nxs1, ys1 = [], []\nprint(zs)\nfor z in zs:\n tracker.predict()\n tracker.update(z)\n m = np.dot(tracker.H, tracker.x)\n\n xs.append(m[0])\n ys.append(m[1])\n xs1.append(z[0])\n ys1.append(z[1])\n print(tracker, z, m)\n\nplt.plot(xs, ys)\nplt.plot(xs1, ys1)\nplt.show()\n\ninput()","repo_name":"haroldmei/MachineLearning","sub_path":"filters/2dtest.py","file_name":"2dtest.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73079620329","text":"from django.shortcuts import redirect, render\nfrom .forms import PostForm\nfrom .serializers import ChiterSerializer\nfrom rest_framework import generics\n\nfrom .models import Chiter\n\ndef chiters(request):\n\tchiter_list = Chiter.objects.order_by('-date')\n\tcontext = {'chiter_list': chiter_list}\n\treturn render(request, 'chiters/chiters.html', context)\n\ndef register(request):\n\tif request.method == \"POST\":\n\t\tform = PostForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tpost = form.save()\n\t\t\tpost.save()\n\t\t\treturn redirect('/chiters/message')\n\telse:\n\t\tform = PostForm()\n\treturn render(request, 'chiters/register.html', {'form': form})\n\ndef message(request):\n\treturn render(request, 'chiters/message.html')\n\n\n#API Views\nclass ChitersList(generics.ListCreateAPIView):\n\tqueryset = Chiter.objects.all()\n\tserializer_class = ChiterSerializer\n\n\nclass ChitersDetail(generics.RetrieveUpdateDestroyAPIView):\n\tqueryset = Chiter.objects.all()\n\tserializer_class = ChiterSerializer","repo_name":"chitcomhub/website","sub_path":"chiters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"23401828478","text":"#! /usr/bin/python3\n###\n# import glob\n# import inspect\n# import importlib\n# import re\n# from multiprocessing import Pool\n# from mpl_toolkits.mplot3d import Axes3D\n# from matplotlib import cm\n# from matplotlib import rc\n# from astropy import time\n# from scipy import interpolate\n# from scipy import signal\n# from scipy import stats\n# import pyxsim\n# import soxs\n# from scipy import optimize\n###\nimport enum\nimport getopt\nimport os\nimport sys\nimport json\nimport math\nimport itertools\nfrom mpi4py import MPI \nimport yt\n# yt.enable_parallelism()\nfrom yt import derived_field\nimport numpy as np\n# from matplotlib import pyplot as pl\nfrom astropy import constants as C\nfrom astropy import units as U\n# import h5py\nRsun = C.R_sun.cgs.value\nMsun = C.M_sun.cgs.value\nday = 24*3600\nLsun = C.L_sun.cgs.value\nsigma_sb = C.sigma_sb.cgs.value\n###\nclass LineOfSight:\n _sl = None\n _lp = None\n los = None\n p1 = None\n r = None\n def __init__(self, ds, theta, phi, p1) -> None:\n # https://yt-project.org/doc/analyzing/objects.html\n # https://yt-project.org/doc/_modules/yt/data_objects/selection_objects/ray.html\n # θ = theta*np.pi/180.0\n # φ = phi*np.pi/180.\n # self.los = np.array([np.sin(θ)*np.cos(φ),np.sin(��)*np.sin(φ),np.cos(θ)])\n self.los = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi),np.cos(theta)])\n self.p1 = p1\n # R = np.sqrt(np.sum(ds.domain_right_edge.value**2))\n R = np.max(ds.domain_right_edge.value)\n start = self.p1-R*self.los\n end = self.p1+R*self.los\n\n lp = np.array([start+dl*self.los for dl in np.linspace(0,2*R,1024)])\n pp = (lp[:,0]<=ds.domain_right_edge.value[0]) & \\\n (lp[:,1]<=ds.domain_right_edge.value[1]) & \\\n (lp[:,2]<=ds.domain_right_edge.value[2]) & \\\n (lp[:,0]>=ds.domain_left_edge.value[0]) & \\\n (lp[:,1]>=ds.domain_left_edge.value[1]) & \\\n (lp[:,2]>=ds.domain_left_edge.value[2])\n if not np.any(pp):\n self.r = []\n self.xyz = []\n start = lp[pp,:][0]\n end = lp[pp,:][-1]\n # ds.domain_right_edge.value, ds.domain_left_edge.value\n self._sl = ds.r[start:end]\n X = self._sl['flash','x'].value\n Y = self._sl['flash','y'].value\n Z = self._sl['flash','z'].value\n\n xyz = np.array([X-start[0],Y-start[1],Z-start[2]]).transpose()\n rxyz = np.linalg.norm(xyz,axis=1)\n\n self._lp = np.argsort(rxyz)\n\n self.r = rxyz[self._lp]\n self.xyz = np.array([X[self._lp],Y[self._lp],Z[self._lp]]).transpose()\n\n def sl(self,src):\n return np.array(self._sl[src].value)[self._lp]\n###\nclass Kappa:\n opal = None\n def __init__(self, opal_table):\n with open(opal_table, 'r') as fh:\n opal = json.load(fh)\n self.opal = opal\n def __call__(self, rho, temp):\n # k = 0.2*(1+0.7) # 0.2(1+X)cm^2/gr\n logRho = np.log10(rho)\n logT = np.log10(temp)\n logR = logRho - 3.0*logT + 18.0\n indR = np.argmin(np.abs(self.opal['logR']-logR))\n indT = np.argmin(np.abs(self.opal['logT']-logT))\n # print(f\" Rho={rho:.2e} T={temp:.2e} kappa={10.**self.opal['table'][indT][indR]:.2e} logR={logR:.2e} logRho={logRho:.2e} logT={logT:.2e} logKappa={self.opal['table'][indT][indR]:.2e}\")\n return 10.**self.opal['table'][indT][indR]\n###\ndef already_exist(fn, odir, no):\n return False\n # ofn = os.path.join(odir, f'TRL_eff_{no:04}.npz')\n # if os.path.exists(ofn):\n # return True\n # return False\n###\ndef p1(alpha,theta,phi):\n p1 = np.array([np.cos(alpha)*np.cos(theta)*np.cos(phi) - np.sin(alpha)*np.sin(phi),\n np.sin(alpha)*np.cos(phi) + np.cos(alpha)*np.cos(theta)*np.sin(phi),\n -np.cos(alpha)*np.sin(theta)]) #.reshape((3,1))\n return p1\n###\ndef calc_fk(param):\n ds = yt.load(param['fn'])\n\n all_obs = list()\n\n param['theta'] = np.array(param['theta'])\n param['phi'] = np.array(param['phi'])\n \n param['R'] = np.linspace(3.*Rsun, np.min(ds.domain_right_edge.value), 100)\n param['alpha_rad'] = np.array([0.,np.pi])\n \n ni = len(param['theta'])\n nj = len(param['phi'])\n nk = len(param['R'])*len(param['alpha_rad'])\n\n Teff = np.zeros((ni,nj,nk))*np.nan\n Reff = np.zeros((ni,nj,nk))*np.nan\n Leff = np.zeros((ni,nj,nk))*np.nan\n XYZeff = np.zeros((ni,nj,nk,3))*np.nan\n p1_list = np.zeros((ni,nj,nk,3))*np.nan\n vel_eff = np.zeros((ni,nj,nk,3))*np.nan\n\n time = np.array([ds.current_time.to('d').value,])\n\n theta_rad = param['theta']*np.pi/180.\n phi_rad = param['phi']*np.pi/180.\n\n dtheta = np.pi/len(param['alpha_rad'])\n dphi = np.pi/len(param['alpha_rad']) \n\n # dtheta_list = np.diff(theta_rad)\n # dtheta_list = np.append(dtheta_list[0],dtheta_list) if theta_rad.size>1 else np.array([np.pi/180.,])\n\n # dphi_list = np.diff(phi_rad)\n # dphi_list = np.append(dphi_list[0],dphi_list) if phi_rad.size>1 else np.array([np.pi/180.,])\n\n # opal_high = Kappa(param['Opal_Table']['high'])\n # opal_low = Kappa(param['Opal_Table']['low'])\n\n # opacity_units = np.array(['COND_VAR [cm^2/s]', 'OPAC_VAR[cm^2/gr]', 'trans_opac [1/cm]', 'kappa=c/3/COND_VAR[1/cm]', 'opac=c/3/COND_VAR/DENS_VAR[cm^2/gr]', 'RadTrans.F90 line 305'], dtype='S40')\n # for i,theta,dtheta in zip(range(ni),theta_rad,dtheta_list):\n # for j,phi,dphi in zip(range(nj),phi_rad,dphi_list):\n fix_kappa = param['kappa']\n for i,theta in zip(range(ni),theta_rad):\n for j,phi in zip(range(nj),phi_rad):\n k = -1\n for rr in param['R']:\n for alpha in param['alpha_rad']:\n k = k + 1\n p_shift = rr*p1(alpha,theta,phi)\n ray = LineOfSight(ds,theta,phi,p_shift)\n\n if ray.r.size<2:\n print('ray size is to small {} at: theta {} phi {} R {} alpha {}'.format(ray.r.size,theta*180./np.pi,phi*180./np.pi,rr/Rsun,alpha*180./np.pi))\n continue\n\n temp = ray.sl( ('flash', 'temp') )\n dens = ray.sl( ('flash', 'dens') )\n # opac = ray.sl( ('flash', 'opac') ) # in units of cm^2/gr\n # size_of_opac = len(dens)\n # opac = np.zeros((size_of_opac,))\n # for rho,T,ind_op in zip(dens,temp,range(size_of_opac)):\n # if T < param['Opal_Table']['T']:\n # opac[ind_op] = opal_low(rho, T)\n # else:\n # opac[ind_op] = opal_high(rho, T)\n # kappa = opac*dens # in units of 1/cm\n\n # ind_op_de = calc_optical_depth(kappa, ray.r)\n ind_op_de = calc_optical_depth(fix_kappa*dens, ray.r)\n if np.isnan(ind_op_de):\n print('Opacity not reach value of 2/3 at: theta {} phi {} R {} alpha {}'.format(theta*180./np.pi,phi*180./np.pi,rr/Rsun,alpha*180./np.pi))\n continue\n\n p1_list[i,j,k,:]=p_shift\n Teff[i,j,k] = temp[ind_op_de]\n Reff[i,j,k] = np.sqrt(np.sum(ray.xyz[ind_op_de,:]**2))\n XYZeff[i,j,k,:] = ray.xyz[ind_op_de,:]\n vel_eff[i,j,k,0] = ray.sl( ('flash', 'velx') )[ind_op_de]\n vel_eff[i,j,k,1] = ray.sl( ('flash', 'vely') )[ind_op_de]\n vel_eff[i,j,k,2] = ray.sl( ('flash', 'velz') )[ind_op_de]\n theta_xyz = np.arccos(ray.xyz[ind_op_de,2]/Reff[i,j,k])\n cos_theta_xyz = np.dot(ray.xyz[ind_op_de,:],ray.los)/(np.linalg.norm(ray.los)*np.linalg.norm(ray.xyz[ind_op_de,:]))\n if cos_theta_xyz <= 0:\n Leff[i,j,k] = 0.\n else:\n Leff[i,j,k] = sigma_sb * Teff[i,j,k]**4 * Reff[i,j,k]**2 * np.sin(theta_xyz)*dtheta*dphi * cos_theta_xyz\n\n ofn = os.path.join(param['chk']['odir'], 'TRL_eff_{:04}_fix_kappa_{}.npz'.format(param['no.'], fix_kappa))\n np.savez(ofn, time=time, Teff=Teff, Reff=Reff, Leff=Leff, XYZeff=XYZeff, p1=p1_list, vel_eff=vel_eff, theta=param['theta'], phi=param['phi']) \n return 'Write {}'.format(ofn)\n###\ndef calc(param):\n for param['kappa'] in param['fix_kappa_list']:\n calc_fk(param)\n###\ndef calc_optical_depth(kappa, r):\n dr = np.diff(r)\n dr = np.append(dr[0],dr)\n opt_dep = kappa*dr\n opacity = np.cumsum(opt_dep[::-1])\n ind = np.sum(opacity<=(2./3.))\n if ind>=opacity.size :\n return np.nan\n return opacity.size-1-ind\n\n # if ind>=opacity.size :\n # ind=opacity.size-1\n # return opacity[::-1], opacity.size-1-ind\n###\ndef main():\n param = dict()\n param['chk'] = dict()\n param['chk']['dir'] = '/home/amirm/code/flash_disk_v4.6.2/obj-res/akashi/'\n param['chk']['odir'] = '/home/amirm/code/flash_disk_v4.6.2/obj-res/akashi/'\n param['chk']['fn'] = 'PN1_hdf5_chk_{:04}'\n param['no.'] = 64 # 61\n param['fn'] = os.path.join(param['chk']['dir'],param['chk']['fn'].format(param['no.']))\n\n param['Opal_Table'] = dict()\n param['Opal_Table']['low'] = '/home/amirm/code/flash_disk_v4.6.2/obj-res/akashi/lowT_fa05_gs98_z0.02_x0.7.data.json'\n param['Opal_Table']['high'] = '/home/amirm/code/flash_disk_v4.6.2/obj-res/akashi/gs98_z0.02_x0.7.data.json'\n param['Opal_Table']['T'] = 6.5e3\n\n param['theta'] = list(range(0,91,10)) # [30., 70.,]\n param['phi'] = [0.,]\n\n param['fix_kappa_list'] = [0.06, 0.3, 1.5]\n\n os.chdir(param['chk']['dir'])\n calc(param)\n###\nif __name__ == \"__main__\":\n main()\n print('done {}'.format(sys.argv[0]))\n###\n###\n","repo_name":"AmirGoshenMichaelis/fast_blue_optical_transients","sub_path":"chk_to_npz/V_obs_calc_resample.py","file_name":"V_obs_calc_resample.py","file_ext":"py","file_size_in_byte":9731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1241952762","text":"# Preprocess.py\n\nimport cv2\nimport numpy as np\nimport math\n\n# module level variables ##########################################################################\nGAUSSIAN_SMOOTH_FILTER_SIZE = (5, 5)\nADAPTIVE_THRESH_BLOCK_SIZE = 19\nADAPTIVE_THRESH_WEIGHT = 9\n\n###################################################################################################\ndef preprocess(imgOriginal):\n imgGrayscale = extractValue(imgOriginal)\n\n imgMaxContrastGrayscale = maximizeContrast(imgGrayscale)\n\n height, width = imgGrayscale.shape\n\n imgBlurred = np.zeros((height, width, 1), np.uint8)\n\n imgBlurred = cv2.GaussianBlur(imgMaxContrastGrayscale, GAUSSIAN_SMOOTH_FILTER_SIZE, 0)\n\n #HSV#########################################################################################\n img_hsv= cv2.cvtColor(imgOriginal,cv2.COLOR_BGR2HSV)\n\n\n # lower mask (0-10)\n lower_red = np.array([0,50,50])\n upper_red = np.array([10,255,255])\n mask0 = cv2.inRange(img_hsv, lower_red, upper_red)\n\n # upper mask (170-180)\n lower_red = np.array([170,50,50])\n upper_red = np.array([180,255,255])\n mask1 = cv2.inRange(img_hsv, lower_red, upper_red)\n\n # join my masks\n mask = mask0+mask1\n\n # set my output img to zero everywhere except my mask\n output_img = imgOriginal.copy()\n output_img[np.where(mask==0)] = 0\n\n # or your HSV image, which I *believe* is what you want\n output_hsv = img_hsv.copy()\n output_hsv[np.where(mask==0)] = 0\n ####################################################\n\n imgThresh= cv2.adaptiveThreshold(mask, 255.0, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, ADAPTIVE_THRESH_BLOCK_SIZE, ADAPTIVE_THRESH_WEIGHT)\n\n return imgGrayscale, imgThresh\n# end function\n\n###################################################################################################\ndef extractValue(imgOriginal):\n height, width, numChannels = imgOriginal.shape\n\n imgHSV = np.zeros((height, width, 3), np.uint8)\n\n imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)\n\n imgHue, imgSaturation, imgValue = cv2.split(imgHSV)\n\n return imgValue\n# end function\n\n###################################################################################################\ndef maximizeContrast(imgGrayscale):\n\n height, width = imgGrayscale.shape\n\n imgTopHat = np.zeros((height, width, 1), np.uint8)\n imgBlackHat = np.zeros((height, width, 1), np.uint8)\n\n structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n\n imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement)\n imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement)\n\n imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)\n imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)\n\n return imgGrayscalePlusTopHatMinusBlackHat\n# end function\n","repo_name":"mu7ammad-3li/learning_opencv","sub_path":"Text_recognition/Preprocess.py","file_name":"Preprocess.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"4867946370","text":"import numpy as np\nN_ROWS = 5\nN_COLS = 6\nN_STATES = N_ROWS*N_COLS \nGOAL = 28\nACTIONS = [\"S\", \"U\", \"D\", \"R\" , \"L\"]\nM_ACTIONS = {\"U\": np.array([0, 1]), \"D\": np.array([0, -1]), \"R\": np.array([1, 0]), \"L\": np.array([-1, 0])}\n# movements = [(0, 1), (0, -1), (1, 0), (-1, 0)]\nT = 15\nN_ACTIONS = len(ACTIONS)\n\nwalls = [(1,2),(2,1),(7,8),(8,7),(13,14),(14, 13),(9,10),(10,9),(15,16),(16,15),(10,16),(16,10),(11,17),(17, 11),(19,25),(25,19),(20,26),(26,20),(21,27),\n (27,21),(27,28),(28,27),(22,28),(28,22)]\n\n\ndef help_printer(input):\n V_mat = np.zeros((N_ROWS,N_COLS))\n for row in range(N_ROWS):\n for col in range(N_COLS):\n print(input[row*N_COLS+col], end=' ')\n V_mat[row,col] = input[row*N_COLS+col]\n print()\n return V_mat\n\ndef is_goal(state):\n return state == GOAL\n\ndef move(row, col, action):\n old_row = row\n old_col = col\n prev_state = row * N_COLS + col\n trans_prob = 0.2\n\n if action == \"S\":\n state = trans_prob, row * N_COLS + col\n elif action == \"R\":\n col = min(N_COLS-1, col+1) \n elif action == \"L\":\n col = max(0, col-1)\n elif action == \"U\":\n row = max(0,row-1)\n elif action == \"D\":\n row = min(N_ROWS-1, row+1)\n\n state = row * N_COLS + col\n reward = 1 if state == GOAL else 0\n if (prev_state, state) in walls:\n state = old_row * N_COLS + old_col\n trans_prob = 0.2\n reward = 0\n\n\n\n return reward, trans_prob, state\n\n\n\ndef build(m_path):\n # print(m_path)\n # minotaur_coord = [None] * len(m_path)\n # minotaur_walk = [None] * len(m_path)\n # minotaur_walk[0] = 4 * N_COLS + 4\n # minotaur_coord[0] = [4, 4]\n # for i in range(1, len(m_path)):\n # minotaur_walk[i] = move(minotaur_coord[i][0], minotaur_walk) # minotaur_walk[i-1] + M_ACTIONS[m_path[i]]\n # print(minotaur_walk)\n P = {state : {action: [] for action in ACTIONS} for state in range(N_STATES)}\n for row in range(N_ROWS):\n for col in range(N_COLS):\n state = row * N_COLS + col\n for action in ACTIONS:\n l = P[state][action]\n reward, trans_prob, s_prim = move(row, col, action)\n # print(f\"{state} -> {s_prim}, {action}: {reward}\")\n l.append((trans_prob, s_prim, reward, is_goal(s_prim)))\n\n return P\n\ndef policy_iteration(P, policy):\n iter = 0\n old_policy = policy.copy()\n while iter < 15:\n print(\"\\n iter: {}\".format(iter))\n V = compute_policy(P,policy) \n policy = update_policy(V, P, policy)\n old_policy = policy\n if np.all(policy == old_policy):\n break\n iter +=1\n return policy, V\n\ndef compute_policy(P,policy):\n diff = 99999\n eps = 0.01\n V = np.zeros((N_STATES,))\n x = 0\n while True:\n delta = 0\n #V_current = V.copy()\n for state in range(N_STATES):\n v = 0\n #action = policy[state]\n for action in ACTIONS:\n trans_prob, s_prim, reward, _ = P[state][action][0]\n # print(trans_prob, s_prim, reward)\n v += trans_prob * (reward + V[s_prim])\n # _ = help_printer(V)\n delta = max(delta, np.abs(v - V[state]))\n print(delta)\n V[state] = v\n if delta < eps:\n break\n x+=1\n return V\n\ndef update_policy(V, P, policy):\n # policy = list(np.arange(N_STATES))\n pi = np.zeros((N_STATES,))\n unchanged = True\n while unchanged:\n for state in range(N_STATES):\n # pi = 0\n # q = np.zeros((N_ACTIONS,))\n value = -10\n best_action = \"\"\n for a, action in enumerate(ACTIONS):\n trans_prob, s_prim, _, _ = P[state][action][0]\n q = trans_prob * V[s_prim]\n if q > value:\n value = q\n best_action = action\n # print(policy[state])\n p, s_new, _, _ = P[state][policy[state]][0]\n if value > p * V[s_new]:\n print(\"Changing the policy.\")\n policy[state] = best_action\n unchanged = False\n # r += p * V[s_new]\n # q[a] = sum([trans_prob * (reward + V[s_prim]) for trans_prob, s_prim, reward, _ in P[state][action]])\n # PI[state] = pi\n # idx = np.argmax(PI)\n # policy[state] = ACTIONS[idx]\n print(\"Done!\")\n return policy\n\n\nminotaur_path = np.random.choice([\"U\", \"D\", \"R\", \"L\"], size=T)\n\nP = build(minotaur_path)\npolicy = np.random.choice(ACTIONS, size=N_STATES)\nnew_policy, V = policy_iteration(P,policy)\nimport matplotlib.pyplot as plt\nprint(policy)\n# V_mat = help_printer(V)[::-1]\n# norm1 = V_mat / np.linalg.norm(V_mat)\n# # print(walls)\n# plt.pcolormesh(norm1)\n# plt.colorbar()\n# plt.show()\n","repo_name":"rsiwerz/el2805","sub_path":"01/task1_policy_iter.py","file_name":"task1_policy_iter.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2136461192","text":"# 프로그래머스 섬 연결하기(Level 3)\nfrom collections import deque\n\ndef solution(n, costs):\n\tanswer = 0\n\tcosts.sort(key = lambda x:x[2])\n\tvisited = set([costs[0][0]])\n\twhile len(visited) != n:\n\t\tfor i, cost in enumerate(costs):\n\t\t\tif cost[0] in visited and cost[1] in visited:\n\t\t\t\tcontinue\n\t\t\tif cost[0] in visited or cost[1] in visited:\n\t\t\t\tvisited.update([cost[0], cost[1]])\n\t\t\t\tanswer += cost[2]\n\t\t\t\tcosts[i] = [-1, -1, -1]\n\t\t\t\tbreak\n\treturn answer\nprint(solution(4, [[0,1,1],[0,2,2],[1,2,5],[1,3,1],[2,3,8]]))","repo_name":"woosteelz/AlgorithmPrac","sub_path":"Programmers/PG_connectLand.py","file_name":"PG_connectLand.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}