diff --git "a/4753.jsonl" "b/4753.jsonl" new file mode 100644--- /dev/null +++ "b/4753.jsonl" @@ -0,0 +1,666 @@ +{"seq_id":"404422817","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 15 15:50:48 2016\n\n@author: yanhan\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom scipy.stats import f_oneway\nimport re,string\nimport pypinyin\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nimport cPickle as pickle\n# from ggplot import *\nimport seaborn as sns\nfrom collections import OrderedDict\n\n#import sys\n#reload(sys)\n#sys.setdefaultencoding('utf8')\n\nsns.set_context(\"paper\")\nsns.set(font_scale = 1.5)\nsns.set_style('whitegrid')\nsns.set_palette(sns.light_palette(\"black\", 3))\n#sns.palplot(sns.color_palette())\n\nplt.rcParams['font.family'] = ['SimHei'] #'Microsoft Jhenghei', 'BiauKai\nplt.rcParams['font.sans-serif'] = ['SimHei'] #'BiauKai'\nplt.rcParams['axes.unicode_minus'] = False\nsns.axes_style()\n\npd.set_option('display.encoding', 'utf8')\n\ndef fun_mean(x):\n return(np.around(np.mean(x),2))\ndef fun_std(x):\n return(np.around(np.std(x),2))\ndef fun_se(x):\n return(np.around(np.std(x)/np.sqrt(len(x)),2))\n\nsubjects = pd.read_excel('資料蒐集表(Data Collection Sheet).xlsx')\n\nerror_dict = {0:u'正確',\n 1:u'右部件的字音',\n 2:u'含相同右部件的鄰居字的讀音',\n 3:u'左部件的字音',\n 4:u'含相同左部件的鄰居字的讀音',\n 5:u'與目標字常常同時出現的字的讀音',\n 6:u'形近字的讀音',\n 7:u'其他',\n 8:u'無反應',\n 9:u'右部件形近字的讀音',\n 10:u'含有右部件形近字的字的讀音'}\nerror_list = pd.Series([0,1,2,9,10,3,4,5,6,7,8]).map(lambda x:error_dict[x])\n\n#sub_group = {1:'Elementary',\n# 2:'Intermediate',\n# 3:'Advanced'}\n \n#sub_diff = {'diff':'Low Frequency',\n# 'easy':'High Frequency'}\n \n#sub_regu = {'regu':'Regular',\n# 'irre':'Irregular'}\n \n#sub_pv = {'low':'Low',\n# 'medium':'Medium',\n# 'high':'High'}\n \nsub_group = OrderedDict([(1,'Elementary'),\n (2,'Intermediate'),\n (3,'Advanced')]) \n \nsub_diff = OrderedDict([('diff','Low Frequency'),\n ('easy','High Frequency')])\n \nsub_regu = OrderedDict([('irre','Irregular'),\n ('regu','Regular')]) \n \nsub_pv = OrderedDict([('low','Low'),\n ('medium','Medium'),\n ('high','High')])\n \nsub_level = ['Low Know Rate','Medium Know Rate', 'High Know Rate']\n \n","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"477041503","text":"import threading\n\nTHREADS = 2\nMAX_COUNT = 1000000\n\ncounter = 0\n\n\ndef cuenta():\n global counter\n\n for i in range(int(MAX_COUNT/THREADS)):\n counter += 1\n\n\nthreads = []\n\nfor i in range(THREADS):\n t = threading.Thread(target=cuenta)\n threads.append(t)\n t.start()\n\nfor t in threads:\n t.join()\n\nprint(f\"Valor del contador: {counter}\")\n\n","sub_path":"contadorConcurrente.py","file_name":"contadorConcurrente.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"347191658","text":"\"\"\"\nPython makes performing file I/O simple. Take a look\nat how to read and write to files here:\n\nhttps://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files\n\"\"\"\n\n# Open up the \"foo.txt\" file (which already exists) for reading\n# Print all the contents of the file, then close the file\n# Note: pay close attention to your current directory when trying to open \"foo.txt\"\n\n# YOUR CODE HERE\ndef print_text(txt):\n file = open(txt, \"r\")\n content = file.read()\n file.close()\n print(content)\n\nprint_text(\"foo.txt\")\n\n# Open up a file called \"bar.txt\" (which doesn't exist yet) for\n# writing. Write three lines of arbitrary content to that file,\n# then close the file. Open up \"bar.txt\" and inspect it to make\n# sure that it contains what you expect it to contain\n\n# YOUR CODE HERE\ndef write_text(txt):\n file2 = open(txt, \"w\")\n file2.write(\"Contrary to popular belief,\\nLorem Ipsum is not\\nsimply random text.\")\n file2.close()\n\nwrite_text(\"bar.txt\")\n","sub_path":"src/13_file_io.py","file_name":"13_file_io.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"217632001","text":"# problem: ordina_x, Romeo Rizzi Mar 2015\n\nimport sys\n\nnBalls = 0\nnPesate = 0\nmaxPesate = 0\nsubtask = 0\nrseed = 0\noutfile = None\n\norder = None\n\nRAND_MAX = 0x7fffffff\n\n\ndef rand_cp():\n global rseed\n rseed = (rseed * 1103515245 + 12345) & RAND_MAX\n return rseed\n\n\ndef generaPerm_random_uniform(perm, n):\n for i in range(n):\n perm[i] = i\n for i in reversed(range(1, n)):\n j = rand_cp() % i\n perm[i], perm[j] = perm[j], perm[i]\n\n\ndef bigliaIntermedia(bigliaA, bigliaB, bigliaC):\n global nPesate\n\n nPesate += 1\n\n if order[bigliaA] >= order[bigliaB]:\n if order[bigliaB] >= order[bigliaC]:\n return bigliaB\n if order[bigliaA] <= order[bigliaC]:\n return bigliaA\n return bigliaC\n else:\n if order[bigliaB] <= order[bigliaC]:\n return bigliaB\n if order[bigliaA] >= order[bigliaC]:\n return bigliaA\n return bigliaC\n\n\ndef consegnaBiglieInOrdine(biglia_in_pos):\n global nBalls, order, outfile, nPesate, maxPesate\n well_ordered = True\n for i in range(nBalls):\n if (order[biglia_in_pos[i]] != i) and (order[biglia_in_pos[i]] != nBalls - i - 1):\n well_ordered = False\n\n print(\"%d %d %d\" % (well_ordered, nPesate, maxPesate), file=outfile)\n # only for debugging\n # for i in range(nBalls):\n # print(file, \"%ld \", order[i])\n # print(file, \"\\n\")\n # for i in range(nBalls):\n # print(file, \"%ld \", biglia_in_pos[i])\n sys.exit(0)\n\n\ndef ottieni_num_balls():\n global nBalls, rseed, outfile, maxPesate, order\n\n infile = open(\"input.txt\", \"r\")\n # infile = sys,stdin;\n\n (nBalls, subtask, seed) = [int(x.strip()) for x in infile.read().split()]\n infile.close()\n\n LOG_UP = 1\n guy = 2\n while guy < nBalls:\n LOG_UP += 1\n guy *= 2\n\n order = [0] * nBalls\n\n rseed = seed\n generaPerm_random_uniform(order, nBalls) # genera permutazione\n\n outfile = open(\"output.txt\", \"w\")\n # outfile = sys.stdout\n\n maxPesate = 1000 * nBalls * nBalls\n\n if subtask == 1:\n for i in range(nBalls):\n order[i] = i\n elif subtask == 2:\n for i in range(nBalls):\n order[i] = nBalls - i - 1\n elif subtask == 5:\n maxPesate = nBalls * (nBalls - 1) / 2\n elif subtask == 6:\n maxPesate = 3 * nBalls * LOG_UP\n elif subtask == 7:\n maxPesate = nBalls + nBalls * (LOG_UP)\n\n return nBalls\n","sub_path":"mediana_x/sol/grader.py","file_name":"grader.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"2793020","text":"# -*- coding: utf-8 -*-\n\nimport ripcord\n\nclass Fixtures(ripcord.Client):\n def __init__(self, **kwargs):\n super(Fixtures, self).__init__(**kwargs)\n\n self.baseurl = 'http://httpbin.org/'\n\n self.add_extra_params({\n 'token': 'a-random-token',\n 'foo': 'oof',\n 'bar': 'rab',\n 'merp': 'prem',\n 'flakes': 'sekalf'\n })\n\n def simulate_status_code(self, status_code):\n self.namespace = 'status'\n return self.get(str(status_code))","sub_path":"tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"61368521","text":"\"\"\"\nThis code is designed to convert a fits file trace into a normalized reflectance spectrum using a solar analog.\nDate:05/21/19\n\"\"\"\n\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport numpy as np\nimport csv\nimport argparse\nimport re\n\n\ndef read_mean_tax():\n mean_spec_file = 'mean_spec/busdemeo-meanspectra.csv'\n with open(mean_spec_file, newline='') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n spec_dict = {}\n for row in csv_reader:\n if line_count == 1:\n header = row\n for head in header:\n spec_dict[head] = []\n line_count += 1\n elif line_count > 1:\n for i, r in enumerate(row):\n spec_dict[header[i]].append(float(r))\n line_count += 1\n else:\n line_count += 1\n return spec_dict\n\n\ndef stand_plot(ax, stand_tax):\n spec_dict = read_mean_tax()\n lam = np.array(spec_dict['Wavelength'])\n lam *= 10000\n for tax in stand_tax:\n tax = tax.lower().capitalize()\n tax_mean = tax+'_Mean'\n tax_sig = tax+'_Sigma'\n try:\n yyy = np.array(spec_dict[tax_mean])\n yyy_error = np.array(spec_dict[tax_sig])\n except KeyError:\n print(\"No such taxonomy as {}.\".format(tax))\n continue\n y_err_upper = yyy + yyy_error\n y_err_lower = yyy - yyy_error\n\n test = [j for j, x in enumerate(lam) if 3500 < x < 10500]\n\n color = next(ax._get_lines.prop_cycler)['color']\n ax.plot(lam[test], y_err_upper[test], linestyle=\":\", color=color, alpha=.5)\n ax.plot(lam[test], y_err_lower[test], linestyle=\":\", color=color, alpha=.5)\n ax.plot(lam[test], yyy[test], color=color, label=tax_mean, alpha=.5)\n\n\ndef smooth(x, window_len=11, window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output:\n the smoothed signal\n\n example:\n\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if len(x) < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if window_len % 2 != 0:\n window_len += 1\n\n if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = np.r_[x[window_len-1:0:-1], x, x[-2:-window_len-1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.'+window+'(window_len)')\n\n y = np.convolve(w/w.sum(), s, mode='valid')\n\n return y[(window_len // 2 - 1):-(window_len // 2)]\n\n\ndef pull_data_from_spectrum(spectra):\n try:\n hdul = fits.open(spectra)\n except FileNotFoundError:\n print(\"Cannot find file {}\".format(spectra))\n return None, None, None\n\n data = hdul[0].data\n hdr = hdul[0].header\n\n yyy = data[0][0]\n w = WCS(hdr, naxis=1, relax=False, fix=False)\n lam = w.wcs_pix2world(np.arange(len(yyy)), 0)[0]\n\n return lam, yyy, hdr\n\n\ndef pull_data_from_text(spectra):\n f = open(spectra)\n lines = f.readlines()\n xxx = []\n yyy = []\n print(len(lines))\n for line in lines:\n try:\n chunks = line.split(' ')\n chunks = list(filter(None, chunks))\n xxx.append(float(chunks[0])*10000)\n yyy.append(float(chunks[1])+.85)\n except ValueError:\n continue\n return xxx, yyy\n\n\ndef spectrum_plot(spectra, ax, data_set, analog=None, offset=0):\n windows = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']\n spec_x, spec_y, spec_header = pull_data_from_spectrum(spectra)\n if spec_y is None:\n return ax, None, None\n\n box = 100\n\n if analog:\n analog_x, analog_y, analog_header = pull_data_from_spectrum(analog)\n if analog_y is None:\n spec_y = [x / (10 ** 20) for x in spec_y]\n yyy = spec_y\n analog = None\n else:\n spec_y_sm = smooth(spec_y, box, windows[1])\n analog_y_sm = smooth(analog_y, box, windows[1])\n yyy = [s / a for s, a in zip(spec_y_sm, analog_y_sm)]\n else:\n spec_y = [x / (10 ** 20) for x in spec_y]\n yyy = spec_y\n\n if not data_set:\n if analog:\n data_set = \"{} -- {} -- {}\".format(spec_header['OBJECT'], analog_header['OBJECT'], spec_header['DAY-OBS'])\n else:\n data_set = \"{} -- {}\".format(spec_header['OBJECT'], spec_header['DAY-OBS'])\n elif data_set.upper() == 'NONE':\n data_set = ''\n\n xxx = spec_x[0:len(yyy)]\n\n smoothy = np.array(yyy)\n\n test = [j for j, x in enumerate(xxx) if 4000 < x < 10000]\n # test = [j for j, x in enumerate(xxx) if 6000 < x < 7000]\n\n find_g = [j for j, x in enumerate(xxx) if 5400 < x < 5600]\n smoothy = smoothy / np.mean(smoothy[find_g])\n\n offy = [y + 0.2*offset for y in smoothy]\n ax.plot(xxx[test], smoothy[test], label=data_set)\n return ax, offy, xxx\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--outpath\", help=\"Output path for plots\", type=str, default='')\n parser.add_argument(\"--path\", help=\"base path spectra\", type=str, default='')\n parser.add_argument(\"--title\", help=\"Title for Plot\", type=str, default='Normalized Spectra')\n args = parser.parse_args()\n path = args.path\n outpath = args.outpath\n title = args.title\n trace = 'test'\n reflec = True\n if path and path[-1] != '/':\n path = path + '/'\n if outpath and outpath[-1] != '/':\n outpath = outpath + '/'\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, title=title)\n\n print(\"List the taxonomic standards to plot. (comma separated format. => X, C, B) \")\n print(\"Possible standards include: A,B,C,Cb,Cg,Cgh,Ch,D,K,L,O,Q,R,S,Sa,Sq,Sr,Sv,T,V,X,Xc,Xe,Xk,Xn,None\")\n stand_tax = input(\"Taxonomic Standards:\")\n\n if stand_tax and 'NONE' not in stand_tax.upper():\n stand_tax = list(filter(None, re.split(r',|\\s|;|\\.|/|-', stand_tax)))\n stand_plot(ax, stand_tax)\n\n while trace:\n print(\"=========================================================================\")\n print(\"Input the path to the 1D merged asteroid trace (Leave blank to skip).\")\n print(\"If using the FLOYDS pipeline, this will be of the form 'trim_ntt*_merge_*_e.fits or ntt*_merge_*2df_ex.fits\")\n trace = input(\"Path to asteroid trace:\")\n\n if trace:\n print(\"=========================================================================\")\n print(\"Input the path to the 1D merged solar analog trace to be removed from this spectrum (Leave blank to skip).\")\n print(\"If using the FLOYDS pipeline, this will be of the form 'trim_ntt*_merge_*_e.fits or ntt*_merge_*2df_ex.fits\")\n sol_trace = input(\"Path to solar analog trace:\")\n if not sol_trace:\n reflec = False\n sol_path_trace = ''\n else:\n sol_path_trace = path+sol_trace\n\n print(\"=========================================================================\")\n print(\"Input label for these data (Leave blank for default, for no label type 'None').\")\n print(\"Default = {object} -- {analog} -- {obj date}\")\n label = input(\"Data label:\")\n\n ax, normalized_ast_spec, ast_wav = spectrum_plot(path+trace, ax, label, sol_path_trace)\n\n if reflec:\n ax.set_ylabel('Relative to Airmass 1.24 (Normalized at $5500 \\AA$)')\n # ax.set_ylabel('Reflectance Spectra (Normalized at $5500 \\AA$)')\n else:\n ax.set_ylabel('Relative Spectra (Normalized at $5500 \\AA$)')\n ax.set_xlabel('Wavelength ($\\AA$)')\n ax.legend()\n plt.savefig(outpath+'temp.png')\n print('New spectroscopy plot saved to {}'.format(outpath+'temp.png'))\n","sub_path":"spectra_comp/spec_comp.py","file_name":"spec_comp.py","file_ext":"py","file_size_in_byte":9020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"38844251","text":"class Solution(object):\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n for i in range(len(triangle)-2,-1,-1):\n for j in range(len(triangle[i])):\n triangle[i][j] = triangle[i][j] + min(triangle[i+1][j], triangle[i+1][j+1])\n return triangle[0][0]\n\np = Solution()\ntriangle = [\n [2],\n [3,4],\n [6,5,7],\n [4,1,8,3]\n]\nprint(p.minimumTotal(triangle))","sub_path":"120. Triangle /solution 3.py","file_name":"solution 3.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"605485366","text":"\n\"\"\"Сумма строк и столбцов двумерного массива\nЗадан целочисленный двумерный массив, состоящий из N строк и M столбцов.\nТребуется вычислить сумму элементов в каждой строке и в каждом столбце.\nПрограмма получает на вход два натуральных числа N и M – количество строк и столбцов двумерного массива.\nВ каждой из последующих N строк записаны M целых чисел – элементы массива.\nВсе числа во входных данных не превышают 1000 по абсолютной величине.\nВ первой строке вам необходимо вывести N чисел – суммы элементов массива для каждой строки в отдельности.\nВо второй строке в аналогичном формате выведите M чисел – суммы элементов для каждого столбца.\"\"\"\n\nN, M = map(int, input().split())\nmatrix_list = []\nstr_list = []\nver_list = []\n\nfor e in range(N):\n matrix_list.append([int(el) for el in input().split()])\n\nfor i in range(N):\n sum_str = 0\n for j in range(M):\n sum_str += matrix_list[i][j]\n str_list.append(sum_str)\nprint(*str_list)\n\nfor k in range(M):\n sum_ver = 0\n for l in range(N):\n sum_ver += matrix_list[l][k]\n ver_list.append(sum_ver)\nprint(*ver_list)","sub_path":"Courses/Инди-курс программирования на Python от egoroff_channel/5.6/5.6.7.py","file_name":"5.6.7.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"4490108","text":"from wisardpkg import KernelCanvas\nfrom random import random\n\nprint(\"\\n\\n\")\nprint(\"### Kernel Canvas ###\")\ndimension = 2\nnumberOfKernels = 10\nbitsByKernel = 4\nkc = KernelCanvas(dimension, numberOfKernels, bitsByKernel=bitsByKernel, useDirection=True)\n\nsequenceData = []\np = [10*random(),10*random()]\nfor i in range(100):\n point = list(p)\n point[0] += i\n point[1] += i\n sequenceData.append(point)\n\nout = kc.transform(sequenceData)\nprint(\"binary output:\",len(out), out)\nprint(\"### DONE Kernel Canvas ###\")\n","sub_path":"test/kernel_canvas_test.py","file_name":"kernel_canvas_test.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"543674738","text":"from django.conf.urls import url\nfrom comments import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\napp_name = 'comments'\n\nurlpatterns = [\n url(r'^post/(?P\\d+)/comment/$', views.add_comment_to_post, name='add_comment_to_post'),\n url(r'^todo/(?P\\d+)/suggestion/$', views.add_suggestion_to_todo, name='add_suggestion_to_todo'),\n url(r'^commentsapi/$', views.CommentListAPI.as_view()),\n url(r'^commentsapi/(?P\\d+)/$', views.CommentDetailSeri.as_view(), name='api_detail'),\n url(r'^commentsapi/(?P\\d+)/update/$', views.CommentUpdateSeri.as_view(), name='api_update'),\n url(r'^commentsapi/(?P\\d+)/delete/$', views.CommentDeleteSeri.as_view(), name='api_delete')\n\n]\n","sub_path":"comments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"517928067","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, msg, site\n\ndef process_page(index, page):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n\n parsed = blib.parse(page)\n\n found_headword_template = False\n for t in parsed.filter_templates():\n if str(t.name) in [\"ru-adj\"]:\n found_headword_template = True\n if not found_headword_template:\n notes = []\n for t in parsed.filter_templates():\n if str(t.name) in [\"ru-noun\", \"ru-noun+\", \"ru-proper noun\", \"ru-proper noun+\"]:\n notes.append(\"found noun header (%s)\" % str(t.name))\n if str(t.name) == \"head\":\n notes.append(\"found head header (%s)\" % getparam(t, \"2\"))\n pagemsg(\"Missing adj headword template%s\" % (notes and \"; \" + \",\".join(notes)))\n\nparser = blib.create_argparser(\"Find missing Russian adjective headwords\")\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nfor index, page in blib.references(\"Template:ru-decl-adj\", start, end):\n process_page(index, page)\n","sub_path":"find_ru_no_adj_headword.py","file_name":"find_ru_no_adj_headword.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"587775135","text":"import cv2\r\nimport pyws\r\nimport numpy as np\r\nimport win32ui\r\nimport win32con\r\nimport win32gui\r\nimport win32print\r\nfrom PIL import ImageGrab\r\nimport datetime\r\n\r\n\r\n# getid - EnumWindows用コールバック関数\r\ndef proc(hwnd, ar):\r\n title = win32gui.GetWindowText(hwnd)\r\n if ar[0] in title:\r\n ar[1].append(hwnd)\r\n return 1\r\n\r\n\r\n# titleをウィンドウタイトルに含むウィンドウのウィンドウハンドルを返します\r\n# title : 検索に使うタイトル\r\n# n : 何番目のウィンドウハンドルを返すか\r\ndef get_handle(title, n=0):\r\n hwnds = []\r\n win32gui.EnumWindows(proc, [title, hwnds])\r\n return hwnds[n]\r\n\r\n\r\ndef get_image(handle):\r\n rect = win32gui.GetWindowRect(handle)\r\n\r\n pos = 8, 59\r\n size = 727, 619\r\n rect = rect[0] + pos[0], rect[1] + pos[1], rect[0] + pos[0] + size[0], rect[1] + pos[1] + size[1]\r\n\r\n img = ImageGrab.grab(rect)\r\n img = np.asarray(img)\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\r\n\r\n # img = cv2.imread('img/reach_left.jpg')\r\n return img\r\n\r\n\r\ndef main():\r\n try:\r\n handle = get_handle('天鳳')\r\n except IndexError:\r\n return\r\n\r\n img = get_image(handle)\r\n name = 'img/' + datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") + '.jpg'\r\n print(name)\r\n cv2.imwrite(name, img)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"GetWindow.py","file_name":"GetWindow.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35543936","text":"from __future__ import print_function\nfrom collections import OrderedDict\nfrom collections import namedtuple\nimport os\n\n\ndef meminfo(): # MEMMORY USAGE\n ''' Return the information in /proc/meminfo\n as a dictionary '''\n meminfo=OrderedDict()\n\n with open('/proc/meminfo') as f:\n for line in f:\n meminfo[line.split(':')[0]] = line.split(':')[1].strip()\n return meminfo\n\nif __name__=='__main__':\n #print(meminfo())\n \n meminfo = meminfo()\n print('Total memory: {0}'.format(meminfo['MemTotal']))\n print('Free memory: {0}'.format(meminfo['MemFree']))\n\ndef process_list(): #PROCESS FUNCTION\n\n pids = []\n for subdir in os.listdir('/proc'):\n if subdir.isdigit():\n pids.append(subdir)\n\n return pids\nif __name__=='__main__':\n\n pids = process_list()\n print('Total number of running processes:: {0}'.format(len(pids)))\n\n\ndef netdevs(): ## Netwrok Usage\n ''' RX and TX bytes for each of the network devices '''\n\n with open('/proc/net/dev') as f:\n net_dump = f.readlines()\n \n device_data={}\n data = namedtuple('data',['rx','tx'])\n for line in net_dump[2:]:\n line = line.split(':')\n if line[0].strip() != 'lo':\n device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0), \n float(line[1].split()[8])/(1024.0*1024.0))\n \n return device_data\n\nif __name__=='__main__':\n \n netdevs = netdevs()\n for dev in netdevs.keys():\n print('{0}: {1} MiB {2} MiB'.format(dev, netdevs[dev].rx, netdevs[dev].tx))\n\n\n","sub_path":"psinfo.py","file_name":"psinfo.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"241009081","text":"import time\nfrom datetime import datetime, timedelta\nfrom urllib.error import URLError\nfrom urllib.parse import urlparse, urljoin\nfrom urllib.request import urlopen, Request\nimport schedule\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nfrom pymongo.errors import PyMongoError\nfrom page import Page\nfrom settings import *\n\ndef crawl_page(url):\n try:\n response = urlopen(Request(url, headers={\"User-Agent\": USER_AGENT}))\n except URLError as error:\n print(URLError(error, url))\n response = None\n \n #Check if there was an error with our response\n if response is not None:\n soup = BeautifulSoup(response, \"lxml\")\n page = Page(url)\n\n #Grab all links\n for anchor in soup.find_all(\"a\"):\n href = anchor.get(\"href\")\n #Check if we have an absolute url or a relative one\n if bool(urlparse(href).netloc):\n link = href \n if link not in page.absolute_links:\n page.absolute_links.append(link) \n else:\n link = urljoin(url, href)\n if link not in page.relative_links: \n page.relative_links.append(link) \n \n #Grab all images\n for img in soup.find_all(\"img\"):\n image = img.get(\"src\") \n if image not in page.images:\n page.images.append(image)\n \n page.sort()\n else:\n page = None\n \n return page\n\ndef load_urls(file_name): \n urls = list()\n try: \n with open(file_name) as file:\n for line in file: \n line = line.lower().strip()\n #Filter out malformed urls\n if line.startswith(\"http://\") or line.startswith(\"https://\"):\n urls.append(line)\n except IOError as error:\n print(\"IOError: \", error)\n \n return urls\n\ndef check_expired_urls(urls, collection):\n expired_urls = list()\n for url in urls:\n try: \n found_urls = collection.find({\"url\": url}) \n except PyMongoError as error:\n print(\"MongoError:\", error) \n \n #Check if the url already exists in the collection\n if found_urls.count() == 0:\n expired_urls.append(url)\n else: \n #Check if the url is ready to be crawled again\n for current_url in found_urls:\n expiry_date = current_url[\"date\"] + timedelta(minutes=EXPIRY_PERIOD)\n current_date = datetime.utcnow()\n if current_date > expiry_date:\n expired_urls.append(url) \n return expired_urls\n \ndef main():\n #Set up database\n client = MongoClient(MONGO_URL)\n database = client.simple_crawler_db \n collection = database.site_data\n \n #Todo: Change from using a file to grabbing from db\n urls = load_urls(INPUT_FILE)\n expired_urls = check_expired_urls(urls, collection)\n \n #Crawl urls and store results\n pages = list()\n for url in expired_urls: \n page = crawl_page(url)\n if page is not None:\n pages.append(page)\n \n #Insert our results into db\n try:\n for page in pages: \n page_info = {\"url\": page.url,\n \"absolute_links\": page.absolute_links,\n \"relative_links\": page.relative_links,\n \"images\": page.images, \n \"date\": datetime.utcnow()} \n \n collection.find_one_and_update({\"url\": page.url}, {\"$set\" : page_info}, upsert=True) \n #print(\"Update or insert\", page.url)\n except PyMongoError as error:\n print(\"MongoError:\", error) \n \nif __name__ == \"__main__\":\n #Run once then set a schedule \n print(\"Crawler started\")\n main()\n schedule.every(SCHEDULE_PERIOD).minutes.do(main)\n \n while True:\n schedule.run_pending()\n time.sleep(SLEEP_PERIOD)\n ","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"427165112","text":"import pandas as pd\nimport sys\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nimport torchvision\nfrom torchvision import transforms\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom model import vgg16\nimport random\nimport matplotlib.pyplot as plt\n\nuse_cuda = torch.cuda.is_available()\n\nclass MyDataset(Dataset):\n\n\tdef __init__(self, file_path, transform=None):\n\t\tself.data = pd.read_csv(file_path)\n\t\tself.label_list = np.array(self.data.iloc[:, 0])\n\t\ttemp = np.array(self.data.iloc[:, 1:])\n\t\timage_list = []\n\t\tfor i in range(0, temp.shape[0]):\n\t\t\ta = np.fromstring(temp[i, 0], dtype = np.float32, sep = ' ').reshape(48, 48, 1)\n\t\t\timage_list.append(a)\n\t\tself.image_list = np.array(image_list)\n\t\tself.transform = transform\n\n\tdef __len__(self):\n\t\treturn len(self.data)\n\n\tdef __getitem__(self, index):\n\t # load image as ndarray type (Height * Width * Channels)\n\t # be carefull for converting dtype to np.uint8 [Unsigned integer (0 to 255)]\n\t # in this example, i don't use ToTensor() method of torchvision.transforms\n\t # so you can convert numpy ndarray shape to tensor in PyTorch (H, W, C) --> (C, H, W)\n\t\t\n\t\timage = self.image_list[index]\n\t\tlabel = self.label_list[index]\n\t\tif self.transform is not None:\n\t\t image = self.transform(image)\n\t\treturn image, label\n\ntrain_dataset = MyDataset(sys.argv[1], transform = transforms.Compose([\n\ttransforms.ToPILImage(),\n\ttransforms.RandomHorizontalFlip(p = 0.3), \n transforms.RandomAffine(0, translate=(0.1, 0.1), scale=(0.8, 1), shear=15, resample=False, fillcolor=0),\n\ttransforms.ToTensor()]))\ndataset_size = len(train_dataset)\nindices = list(range(dataset_size))\nbatch_size = 64\nsp = int(np.floor(dataset_size*0.8))\ntrain_indices, val_indices = indices[:sp], indices[sp:]\ntrain_sampler = SubsetRandomSampler(train_indices)\nvalid_sampler = SubsetRandomSampler(val_indices)\ntrain_loader = DataLoader(train_dataset, batch_size = batch_size, sampler = train_sampler)\nvalid_loader = DataLoader(train_dataset, batch_size = batch_size, sampler = valid_sampler)\n\nnet = vgg16()\nif use_cuda:\n\tnet = net.cuda()\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(net.parameters(), lr = 1e-4)\nloss_list, acc_list = [], []\nloss_list1, acc_list1 = [], []\n\ndef train(epoch):\n\tnet.train()\n\tfor i, (images, labels) in enumerate(train_loader):\n\t\tif use_cuda:\n\t\t\timages, labels = images.cuda(), labels.cuda()\n\t\toptimizer.zero_grad()\n\n\t\toutput = net(images)\n\n\t\tloss = criterion(output, labels)\n\n\t\tif i % 10 == 0:\n\t\t print('Train - Epoch %d, Batch: %d, Loss: %f' % (epoch, i, loss.data.item()))\n\n\t\tloss.backward()\n\t\toptimizer.step()\ndef valid():\n\tnet.eval()\n\ttotal_correct = 0\n\tavg_loss = 0.0\n\tfor i, (images, labels) in enumerate(valid_loader):\n\t\tif use_cuda:\n\t\t\timages, labels = images.cuda(), labels.cuda()\n\t\toutput = net(images)\n\t\tavg_loss += criterion(output, labels).sum().item()\n\t\tpred = output.data.max(1)[1]\n\t\ttotal_correct += pred.eq(labels.data.view_as(pred)).sum().item()\n\n\tavg_loss /= (dataset_size - sp)\n\tacc = float(total_correct) / (dataset_size - sp)\n\tprint('Valid Avg. Loss: %f, Accuracy: %f' % (avg_loss, acc))\n\tacc_list.append(acc)\n\tloss_list.append(avg_loss)\n\ttotal_correct = 0\n\tavg_loss = 0.0\n\tfor i, (images, labels) in enumerate(valid_loader):\n\t\tif use_cuda:\n\t\t\timages, labels = images.cuda(), labels.cuda()\n\t\toutput = net(images)\n\t\tavg_loss += criterion(output, labels).sum().item()\n\t\tpred = output.data.max(1)[1]\n\t\ttotal_correct += pred.eq(labels.data.view_as(pred)).sum().item()\n\n\tavg_loss /= (dataset_size - sp)\n\tacc = float(total_correct) / (dataset_size - sp)\n\tprint('Valid Avg. Loss: %f, Accuracy: %f' % (avg_loss, acc))\n\tacc_list1.append(acc)\n\tloss_list1.append(avg_loss)\n'''\ndef plotData(plt, x_data, y_data, y1_data, y_label):\n\tx = [p for p in x_data]\n\ty = [q for q in y_data]\n\ty1 = [r for r in y1_data]\n\tplt.title('Learning Curve')\n\tplt.xlabel('Epoch')\n\tplt.ylabel(y_label)\n\tplt.plot(x, y, '-.', label = 'valid')\n\tplt.plot(x, y1, '-.', label = 'train')\n\tplt.savefig(y_label)\n\tplt.close('all')\n'''\ndef train_and_test(epoch):\n\ttrain(epoch)\n\tvalid()\n\t\n\nfor e in range(1, 500):\n\ttrain_and_test(e)\n'''\nepoch_list = list(range(1, 500))\nplotData(plt, epoch_list, acc_list, acc_list1, 'Training accuracy')\nplotData(plt, epoch_list, loss_list, loss_list1, 'Training loss')\n'''\ntorch.save(net.state_dict(), sys.argv[2])","sub_path":"hw3/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"411221443","text":"from itertools import permutations\r\nfrom collections import defaultdict\r\n\r\n\r\nclass Expression(object):\r\n\r\n def __init__(self, value, a, b, operator, func):\r\n self.a = a\r\n self.b = b\r\n self.value = value\r\n self.operator = operator\r\n self.func = func\r\n\r\n @property\r\n def a_val(self):\r\n if isinstance(self.a, Expression):\r\n return self.a.a_val\r\n return self.a\r\n\r\n @property\r\n def b_val(self):\r\n if isinstance(self.b, Expression):\r\n return self.b.b_val\r\n return self.b\r\n\r\n def as_equation(self):\r\n a = self.a.as_equation() if isinstance(self.a, Expression) else [self.a]\r\n b = self.b.as_equation() if isinstance(self.b, Expression) else [self.b]\r\n\r\n equation = '({} {} {})'.format(a, self.operator, b)\r\n return equation\r\n\r\n def numbers(self):\r\n \"\"\" :type: list[int]\"\"\"\r\n a_nums = self.a.numbers() if isinstance(self.a, Expression) else [self.a]\r\n b_nums = self.b.numbers() if isinstance(self.b, Expression) else [self.b]\r\n\r\n numbers = a_nums + b_nums\r\n return numbers\r\n\r\n def as_step(self):\r\n a = self.a_val if isinstance(self.a, int) else self.a.value\r\n b = self.b_val if isinstance(self.b, int) else self.b.value\r\n\r\n return '{} {} {} = {}'.format(a, self.operator, b, self.value)\r\n\r\n def steps(self):\r\n if isinstance(self.a, int) and isinstance(self.b, int):\r\n return [self.as_step()]\r\n\r\n a_step = [] if isinstance(self.a, int) else self.a.steps()\r\n b_step = [] if isinstance(self.b, int) else self.b.steps()\r\n\r\n return [self.as_step()] + a_step + b_step\r\n\r\n def ordered_steps(self):\r\n return reversed(self.steps())\r\n\r\n def __str__(self):\r\n return '''{} = {} | {}'''.format(self.as_equation(), self.value, sorted(self.numbers()))\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Expression):\r\n return False\r\n\r\n if sorted(self.numbers()) != sorted(other.numbers()):\r\n return False\r\n\r\n if self.as_equation() == other.as_equation():\r\n return True\r\n\r\n if self.operator != other.operator:\r\n return False\r\n\r\n if other.operator in ['+', '*']:\r\n a_equal = self.a == other.a\r\n b_equal = self.b == other.b\r\n\r\n if a_equal or b_equal:\r\n return True\r\n\r\n return False\r\n\r\n\r\nclass Solver(object):\r\n\r\n def __init__(self):\r\n self._ops = {\r\n '+': lambda x, y: x + y,\r\n '-': lambda x, y: x - y,\r\n '*': lambda x, y: x * y,\r\n '/': lambda x, y: x // y if (x % y) == 0 else 0\r\n }\r\n\r\n self.last_answer = None\r\n\r\n def answers(self, numbers, target):\r\n \"\"\" :rtype: Expression \"\"\"\r\n n_numbers = len(numbers)\r\n\r\n # get all permutations for pairs of two numbers\r\n partials_map = defaultdict(list)\r\n\r\n # create the initial set of expressions for the given permutation\r\n for (a, b) in permutations(numbers, 2):\r\n for op, func in self._ops.items():\r\n value = func(a, b)\r\n if is_valid_value(value):\r\n expression = Expression(value, a, b, op, func)\r\n partials_map[value].append(expression)\r\n\r\n partials_map = filter_duplicates(partials_map)\r\n\r\n iteration = 0\r\n while iteration < n_numbers - 2:\r\n iteration += 1\r\n partials_map, target_found = process(numbers, self._ops, partials_map, target)\r\n\r\n if target_found:\r\n break\r\n\r\n\r\n # get the value closet to the target\r\n best_value = None\r\n deviation = 1E1000\r\n for value in sorted(partials_map.keys()):\r\n if value == target:\r\n best_value = target\r\n deviation = 0\r\n\r\n dev = abs(target - value)\r\n if (best_value is None) or (dev < deviation):\r\n best_value = value\r\n deviation = dev\r\n\r\n # get the best expression\r\n best_expression = None\r\n for expression in partials_map[best_value]:\r\n if (best_expression is None) or (len(expression.numbers()) < len(best_expression.numbers())):\r\n best_expression = expression\r\n\r\n self.last_answer = best_expression\r\n return best_expression\r\n\r\n\r\ndef process(numbers, operations, partials_map, target):\r\n new_partials_map = defaultdict(list) # because you cant add during iteration\r\n for _, expressions in partials_map.items():\r\n for expression in expressions:\r\n for num in numbers:\r\n if not is_subset(numbers, expression.numbers() + [num]):\r\n continue\r\n\r\n for op, func in operations.items():\r\n value = func(expression.value, num)\r\n if is_valid_value(value):\r\n new_expression = Expression(value, expression, num, op, func)\r\n new_partials_map[value].append(new_expression)\r\n\r\n if value == target:\r\n partials_map = merge_maps(partials_map, new_partials_map)\r\n partials_map = filter_duplicates(partials_map)\r\n return partials_map, True\r\n\r\n partials_map = merge_maps(partials_map, new_partials_map)\r\n partials_map = filter_duplicates(partials_map)\r\n\r\n return partials_map, False\r\n\r\n\r\ndef is_valid_value(value):\r\n if value <= 0:\r\n return False\r\n return True\r\n\r\n\r\ndef is_subset(available, selected):\r\n av_freq = _list_to_freq(available)\r\n sel_freq = _list_to_freq(selected)\r\n\r\n # ensure the selected has no more keys than available\r\n if len(sel_freq.keys()) > len(av_freq.keys()):\r\n return False\r\n\r\n # check key values to ensure that selected is not higher for any given key\r\n for value, freq in av_freq.items():\r\n if sel_freq[value] > freq:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef _list_to_freq(arr):\r\n freq = defaultdict(int)\r\n\r\n for v in arr:\r\n freq[v] += 1\r\n\r\n return freq\r\n\r\n\r\ndef merge_maps(map1, map2):\r\n merged = defaultdict(list)\r\n\r\n for key, expressions in map1.items():\r\n merged[key].extend(expressions)\r\n\r\n for key, expressions in map2.items():\r\n merged[key].extend(expressions)\r\n\r\n return merged\r\n\r\n\r\ndef _print_map(value_map):\r\n for value, expressions in value_map.items():\r\n print('Answers for: {}'.format(value))\r\n for exp in expressions:\r\n print('\\t{}'.format(exp))\r\n\r\n\r\ndef filter_duplicates(expression_map):\r\n # this might need to become a prefix notation check\r\n\r\n new_partial_map = defaultdict(list)\r\n for value, expressions in expression_map.items():\r\n filtered_expression = []\r\n for e in expressions:\r\n if e not in filtered_expression:\r\n filtered_expression.append(e)\r\n new_partial_map[value] = filtered_expression\r\n\r\n return new_partial_map\r\n\r\n\r\n","sub_path":"countdown/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"64590550","text":"df = pd.read_csv('EUR_USD.csv')\n\ndf['Date'] = df['Date'].astype('str')\ndf['Timestamp'] = df['Timestamp'].astype('str')\n\nind = df['Date']+' ' + df['Timestamp']\n\nind.name='Date'\n\ndf2 = df.set_index(ind)[['Open','High','Low','Close','Volume']]\n\n\ndf2.index= pd.DatetimeIndex(df2.index)\n\ndf2.to_csv('EURUSD_cleaned.csv')\n","sub_path":"OnePy/old/clean_fx.py","file_name":"clean_fx.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"119382840","text":"import tensorflow as tf\nimport numpy as np\nimport retro\n\nfrom skimage import transform\nfrom skimage.color import rgb2gray\n\nimport matplotlib.pyplot as plt\n\nfrom collections import deque\n\nimport random\n\nimport warnings\n\nimport memory\nimport dqnetwork\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nhandler = logging.FileHandler('spaceinvaders.log')\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nwarnings.filterwarnings('ignore')\n\nenv = retro.make(game=\"SpaceInvaders-Atari2600\")\n\nlogger.info(\"The size of our frame is: {}\".format(env.observation_space))\nlogger.info(\"The action size is: {}\".format(env.action_space.n))\n\n### model hyperparameters\nstate_size = [110, 84, 4]\naction_size = env.action_space.n\nlearning_rate = 0.00025\n\n### training parameters\ntotal_episodes = 50\nmax_steps = 50000\nbatch_size = 64\n\n### exploration parameters\nexplore_start = 1.0\nexplore_stop = 0.01\ndecay_rate = 0.00001\n\n### Q learning parameters\ngamma = 0.9\n\n### memory hyperparameters\npretrain_length = batch_size\nmemory_size = 30000\n\n### preprocessing parameters\nstack_size = 4\n\n### training\ntraining = True\n\n### render\nepisode_render = False\n\n### play agent\nagent_test = False\n\n\npossible_actions = np.array(np.identity(env.action_space.n, dtype=int).tolist())\nstacked_frames = deque([np.zeros((110, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)\n\ndef preprocess_frame(frame):\n gray = rgb2gray(frame)\n\n cropped_frame = gray[8:-12, 4:-12]\n normalized_frame = cropped_frame/255.0\n preprocessed_frame = transform.resize(normalized_frame, [110, 84])\n return preprocessed_frame\n\ndef stack_frames(stacked_frames, state, is_new_episode):\n frame = preprocess_frame(state)\n\n if is_new_episode:\n stacked_frames = deque([np.zeros((110, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)\n stacked_frames.append(frame)\n stacked_frames.append(frame)\n stacked_frames.append(frame)\n stacked_frames.append(frame)\n \n stacked_state = np.stack(stacked_frames, axis=2)\n\n else:\n stacked_frames.append(frame)\n stacked_state = np.stack(stacked_frames, axis=2)\n\n return stacked_state, stacked_frames\n\ntf.reset_default_graph()\nDQNetwork = dqnetwork.DQNetwork(state_size, action_size, learning_rate)\ndqn_memory = memory.Memory(max_size = memory_size)\n\nfor i in range(pretrain_length):\n if i == 0:\n state = env.reset()\n state, stacked_frames = stack_frames(stacked_frames, state, True)\n\n choice = random.randint(1, len(possible_actions))-1\n action = possible_actions[choice]\n next_state, reward, done, _ = env.step(action)\n\n #env.render()\n\n next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)\n\n if done:\n next_state = np.zeros(state.shape)\n dqn_memory.add((state, action, reward, next_state, done))\n state = env.reset()\n state, stacked_frames = stack_frames(stacked_frames, state, True)\n else:\n dqn_memory.add((state, action, reward, next_state, done))\n state = next_state\n\nwriter = tf.summary.FileWriter(\"/tmp/tb/dqn/1\")\ntf.summary.scalar(\"Loss\", DQNetwork.loss)\nwrite_op = tf.summary.merge_all()\n\ndef predict_action(explore_start, explore_stop, decay_rate, decay_step, state, actions):\n exp_exp_tradeoff = np.random.rand()\n\n explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)\n if (explore_probability > exp_exp_tradeoff):\n choice = random.randint(1, len(possible_actions)) - 1\n action =possible_actions[choice]\n else:\n Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: state.reshape((1, *state.shape))})\n choice = np.argmax(Qs)\n action = possible_actions[choice]\n\n return action, explore_probability\n\nsaver = tf.train.Saver()\n\nif training == True:\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n decay_step = 0\n rewards_list = []\n for episode in range(total_episodes):\n step = 0\n episode_rewards = []\n state = env.reset()\n state, stacked_frames = stack_frames(stacked_frames, state, True)\n\n while step < max_steps:\n step += 1\n decay_step += 1\n action, explore_probability = predict_action(explore_start, explore_stop, decay_rate,\n decay_step, state, possible_actions)\n next_state, reward, done, _ = env.step(action)\n\n if episode_render:\n env.render()\n\n episode_rewards.append(reward)\n\n if done:\n next_state = np.zeros((110, 84), dtype=np.int)\n next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)\n step = max_steps\n total_reward = np.sum(episode_rewards)\n rewards_list.append((episode, total_reward))\n dqn_memory.add((state, action, reward, next_state, done))\n else:\n next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)\n dqn_memory.add((state, action, reward, next_state, done))\n state = next_state \n ### LEARNING PART\n batch = dqn_memory.sample(batch_size)\n states_mb = np.array([each[0] for each in batch], ndmin=3)\n actions_mb = np.array([each[1] for each in batch])\n rewards_mb = np.array([each[2] for each in batch])\n next_states_mb = np.array([each[3] for each in batch], ndmin=3)\n dones_mb = np.array([each[4] for each in batch])\n\n target_Qs_batch = []\n\n Qs_next_state = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: next_states_mb})\n for i in range(0, len(batch)):\n terminal = dones_mb[i]\n if terminal:\n target_Qs_batch.append(rewards_mb[i])\n else:\n target = rewards_mb[i] + gamma*np.max(Qs_next_state[i])\n target_Qs_batch.append(target)\n\n targets_mb = np.array([each for each in target_Qs_batch])\n loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer],\n feed_dict={DQNetwork.inputs_: states_mb,\n DQNetwork.targetQ: targets_mb,\n DQNetwork.actions_: actions_mb})\n summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: states_mb,\n DQNetwork.targetQ: targets_mb,\n DQNetwork.actions_: actions_mb})\n \n logger.info(\"Episode: {},Total reward: {},Explore P: {:.4f},Training Loss: {:.4f}\".format(episode, total_reward, explore_probability, loss))\n writer.add_summary(summary, episode)\n writer.flush()\n\n if episode % 5 == 0:\n save_path = saver.save(sess, \".models/model.ckpt\")\n logger.info(\"Model saved.\")\n\n\nif agent_test:\n total_test_rewards = []\n saver.restore(sess, \".models/model.ckpt\")\n for episode in range(1):\n total_reward = 0\n\n state = env.reset()\n state, stacked_frames = stack_frames(stacked_frames, state, True)\n\n logger.info(\"*******************************************************\")\n logger.info(\"EPISODE \", episode)\n\n while True:\n state = state.reshape((1, *state_size))\n\n Qs = sess.run(DQNetwork.output, feed_dict= {DQNetwork.inputs_: state})\n\n choice = np.argmax(Qs)\n action = possible_actions[choice]\n\n next_state, reward, done, _ = env.step(action)\n env.render()\n\n total_reward += reward\n if done:\n logger.info(\"Score: {}\". format(total_reward))\n total_test_rewards.append(total_reward)\n break\n\n state, stacked_frames = stack_frames(stacked_frames, state, False)\n state = next_state\n env.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"140314926","text":"\"\"\"\nExtract the 'repo = ' line from a credentials file.\nFile must have fixed name credentials.py, in this directory\n\"\"\"\nimport configparser\ntry:\n config = configparser.ConfigParser()\n config.read('scripts/credentials.conf')\n print(config['DEFAULT']['repo'])\nexcept Exception as err: \n print(\"***Unable to extract repo line***\")\n sys.exit(1) # Error code for shell\n\n \n","sub_path":"grading/gradingApplication/scripts/extract_repo.py","file_name":"extract_repo.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"654122602","text":"import cv2\r\nimport tkinter as tk\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\nfrom tkinter import messagebox\r\n\r\nwhite = \"#ffffff\"\r\nlightBlue2 = \"#adc5ed\"\r\nfont = \"Constantia\"\r\nfontButtons = (font, 12)\r\nmaxWidth = 800\r\nmaxHeight = 640\r\n\r\n# Grafik oyna\r\nmainWindow = tk.Tk()\r\nmainWindow.configure(bg=lightBlue2)\r\nmainWindow.geometry('%dx%d+%d+%d' % (maxWidth, maxHeight, 0, 0))\r\nmainWindow.resizable(0, 0)\r\n\r\nmainFrame = Frame(mainWindow)\r\nmainFrame.place(x=70, y=70)\r\n\r\n# Video tasvirlarni joylash\r\nlmain = tk.Label(mainFrame)\r\nlmain.grid(row=0, column=0)\r\n\r\n\r\ndef Tugadi():\r\n answer = tk.messagebox.askquestion(\"Are you sure ?\", \"Dastur tugatilsinmi ?\")\r\n if answer == 'yes':\r\n mainWindow.destroy()\r\n\r\n\r\nstartButton = Button(mainWindow, text=\"START\", font=fontButtons, bg=white, width=15, height=1)\r\nstartButton.place(x=200, y=570)\r\nstartButton.configure(command=lambda: show_frame())\r\n\r\ncloseButton = Button(mainWindow, text=\"QUIT\", font=fontButtons, bg=white, width=15, height=1)\r\ncloseButton.configure(command=lambda: Tugadi())\r\ncloseButton.place(x=450, y=570)\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n\r\ndef show_frame():\r\n ret, frame = cap.read()\r\n\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n img = Image.fromarray(cv2image)\r\n imgtk = ImageTk.PhotoImage(image=img)\r\n lmain.configure(image=imgtk)\r\n lmain.imgtk = imgtk\r\n\r\n mainWindow.after(10, show_frame)\r\n\r\n\r\nmainWindow.mainloop() # GUI boshlanadi\r\n","sub_path":"Python and Opencv projects/Python+Opencv+Tkinter.py","file_name":"Python+Opencv+Tkinter.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"150431268","text":"'''\nCopyright 2019 Broadcom. The term \"Broadcom\" refers to Broadcom Inc.\nand/or its subsidiaries.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nimport sys\nimport os\nimport pytest\n\nfrom ztp.ZTPLib import runCommand, getCfg\nfrom .testlib import createPySymlink\nsys.path.append(getCfg('plugins-dir'))\n\ncreatePySymlink(getCfg('plugins-dir')+'/connectivity-check')\nfrom connectivity_check import ConnectivityCheck\n\nclass TestClass(object):\n\n '''!\n This class allow to define unit tests for class ConnectivityCheck\n '''\n\n def test_data_hardening_test1(self, tmpdir):\n '''!\n Test case when we call the plugin with incomplete or wrong data\n '''\n d = tmpdir.mkdir(\"valid\")\n fh = d.join(\"input.json\")\n fh.write(\"\"\"\n {\n \"Foo\": \"empty\"\n }\n \"\"\")\n connectivity_check = ConnectivityCheck(str(fh))\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n connectivity_check.main()\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 1\n\n def test_data_hardening_test2(self, tmpdir):\n '''!\n Test case when we call the plugin with incomplete or wrong data\n '''\n d = tmpdir.mkdir(\"valid\")\n fh = d.join(\"input.json\")\n fh.write(\"\"\"\n {\n \"ztp\": { }\n }\n \"\"\")\n connectivity_check = ConnectivityCheck(str(fh))\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n connectivity_check.main()\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 1\n\n def test_ping_localhost(self, tmpdir):\n '''!\n Test case pinging IPV4 localhost:\n Verify that pinging IPV4 localhost succeeds\n '''\n d = tmpdir.mkdir(\"valid\")\n fh = d.join(\"input.json\")\n fh.write(\"\"\"\n {\n \"connectivity-check\": {\n \"ping-hosts\": \"127.0.0.1\",\n \"deadline\": 15\n }\n }\n \"\"\")\n connectivity_check = ConnectivityCheck(str(fh))\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n connectivity_check.main()\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 0\n\n def test_ping_non_routable_address(self, tmpdir):\n '''!\n Test case pinging non routable IPV4 address:\n Verify that pinging IPV4 non routable address fails\n '''\n d = tmpdir.mkdir(\"valid\")\n fh = d.join(\"input.json\")\n fh.write(\"\"\"\n {\n \"01-connectivity-check\": {\n \"retry-count\": 2,\n \"retry-interval\": 15,\n \"timeout\": \"10\",\n \"ping-hosts\": [\"192.0.2.1\", 123]\n }\n }\n \"\"\")\n connectivity_check = ConnectivityCheck(str(fh))\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n connectivity_check.main()\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 1\n\n def test_ping_ipv6_localhost(self, tmpdir):\n '''!\n Test case pinging IPV6 localhost\n Verify that pinging IPV6 localhost succeeds\n '''\n d = tmpdir.mkdir(\"valid\")\n fh = d.join(\"input.json\")\n fh.write(\"\"\"\n {\n \"connectivity-check\": {\n \"ping6-hosts\": [\"0:0:0:0:0:0:0:1\"],\n \"retry-count\": -2,\n \"retry-interval\": -15\n }\n }\n \"\"\")\n connectivity_check = ConnectivityCheck(str(fh))\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n connectivity_check.main()\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 0\n\n def test_ping_ipv6_non_routable_address(self, tmpdir):\n '''!\n Test case pinging non routable IPV6 address:\n Verify that pinging IPV6 non routable address fails\n '''\n d = tmpdir.mkdir(\"valid\")\n fh = d.join(\"input.json\")\n fh.write(\"\"\"\n {\n \"connectivity-check\": {\n \"ping6-hosts\": [\"0:0:0:0:0:0:0:1\", \"fe:80:0:0:0:0:0:1\"],\n \"retry-count\": 2\n }\n }\n \"\"\")\n connectivity_check = ConnectivityCheck(str(fh))\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n connectivity_check.main()\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 1\n","sub_path":"tests/test_connectivity-check.py","file_name":"test_connectivity-check.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"484462323","text":"import PyPDF2, os\r\n\r\n\r\ndef mergeDefault():\r\n\r\n\tfiles = []\r\n\tfor filename in os.listdir('pdf'):\r\n\t\tif filename.endswith('pdf'):\r\n\r\n\t\t\tfiles.append(filename)\r\n\tos.chdir('pdf')\r\n\twriter = PyPDF2.PdfFileWriter()\r\n\tfiles.sort(key = str.lower)\r\n\t#os.getcwd()\r\n\r\n\r\n\r\n\tfor filename in files:\r\n\r\n\t\tfileObj = open (filename,'rb') \r\n\t\treader = PyPDF2.PdfFileReader(fileObj)\r\n\t\tfor pageNum in range(0,reader.numPages):\r\n\t\t\tpageObj = reader.getPage(pageNum)\r\n\t\t\twriter.addPage(pageObj)\r\n\t\t\r\n\r\n\tresultPdf = open('FinalPDF.pdf','wb')\r\n\twriter.write(resultPdf)\r\n\tfileObj.close()\r\n\tresultPdf.close()\r\n\r\ndef decide():\r\n\tglobal var\r\n\tnum = var.get()\r\n\tif num == 1:\r\n\t\tmergeDefault()\r\nfrom Tkinter import *\r\ntop = Tk()\r\ntop.title('Merge PDFs')\r\n\r\nstring = StringVar()\r\nlabel = Label(top, textvariable=string )\r\nstring.set(\"All PDFs in the set directory will be merged according to alphabetical order of file name.\\n To Merge check the box below and start merge.\")\r\nlabel.pack()\r\n\r\nvar = IntVar()\r\nC = Checkbutton(top, text = \"Merge all PDFs directly\", variable = var)\r\nC.pack()\r\n\r\nb = Button(top,text='Start Merging',command=decide)\r\nb.pack()\r\n\r\na= Button(top, text=\"Close\", command=quit)\r\na.pack()\r\n\r\ntop.mainloop()","sub_path":"merge_pdf_files.py","file_name":"merge_pdf_files.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"593621615","text":"#!/usr/bin/env python\n\nimport argparse\nimport sys\n\ntry:\n from typing import Dict\nexcept ImportError:\n pass\n\nreference_frequency = {\n \"E\": 12.02,\n \"T\": 9.10,\n \"A\": 8.12,\n \"O\": 7.68,\n \"I\": 7.31,\n \"N\": 6.95,\n \"S\": 6.28,\n \"R\": 6.02,\n \"H\": 5.92,\n \"D\": 4.32,\n \"L\": 3.98,\n \"U\": 2.88,\n \"C\": 2.71,\n \"M\": 2.61,\n \"F\": 2.30,\n \"Y\": 2.11,\n \"W\": 2.09,\n \"G\": 2.03,\n \"P\": 1.82,\n \"B\": 1.49,\n \"V\": 1.11,\n \"K\": 0.69,\n \"X\": 0.17,\n \"Q\": 0.11,\n \"J\": 0.10,\n \"Z\": 0.07,\n}\n\n\ndef parse_arguments():\n # type: () -> argparse.Namespace\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"FILE\",\n help=\"The input file with the ciphertext (use '-' for standard input)\",\n default=\"-\")\n return parser.parse_args()\n\n\ndef read_ciphertext(options):\n # type: (argparse.Namespace) -> str\n\n if options.FILE == \"-\":\n with sys.stdin as fd:\n text = fd.readlines()\n else:\n with open(options.FILE) as fd:\n text = fd.readlines()\n ciphertext = \"\"\n for line in text:\n ciphertext += line.lower().rstrip()\n return ciphertext\n\n\ndef count_letters(text):\n # type: (str) -> Dict[str, Dict[ str, float]]\n \"\"\"Count the letter frequencies in 'text'\n\n Count the letter frequency in 'text'. The output is a dictionary\n {'LETTER': { \"count\": COUNT, \"frequency\": FREQUENCY(in %) }.\n\n \"\"\"\n\n result = {} # type: Dict[str, Dict[str, float]]\n total = 0 # type: int\n for letter in text:\n letter = letter.strip()\n if len(letter) == 0:\n continue\n total += 1\n if letter in result:\n result[letter][\"count\"] += 1\n else:\n result[letter] = {\"count\": 1}\n for letter in result:\n result[letter][\"frequency\"] = result[letter][\"count\"] / total * 100\n return result\n\n\ndef main(options):\n # type: (argparse.Namespace) -> None\n\n ciphertext = read_ciphertext(options)\n letter_frequencies = count_letters(ciphertext)\n letters = sorted(letter_frequencies,\n key=lambda l: letter_frequencies[l][\"count\"],\n reverse=True)\n reference = sorted(reference_frequency,\n key=reference_frequency.get, reverse=True)\n print(\"ciphertext | reference\")\n print(\"--------------------------\")\n for l in letters:\n reference_string = \"-\"\n if len(reference) > 0:\n reference_string = \"%6.3f (%s)\" % (\n reference_frequency[reference[0]], reference[0].lower())\n del reference[0]\n print(\"%s %4d %6.3f | %s\" % (\n l, letter_frequencies[l][\"count\"],\n letter_frequencies[l][\"frequency\"], reference_string))\n\n\nif __name__ == \"__main__\":\n main(parse_arguments())\n","sub_path":"scripts/frequency_analysis.py","file_name":"frequency_analysis.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"274960002","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'\n\ndb = SQLAlchemy(app)\n\n\nclass Todo(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n text = db.Column(db.String(200))\n complete = db.Column(db.Boolean)\n\n\n@app.route('/')\ndef home():\n incomplete = Todo.query.filter_by(complete=False).all()\n complete = Todo.query.filter_by(complete=True).all()\n\n return render_template('home.html', incomplete=incomplete, complete=complete)\n\n\n@app.route('/add', methods=['POST'])\ndef add():\n todotext = request.form['todotext']\n todo = Todo(text=todotext, complete=False)\n db.session.add(todo)\n db.session.commit()\n\n return redirect(url_for('home'))\n\n\n@app.route('/complete/')\ndef complete(id):\n todo = Todo.query.filter_by(id=int(id)).first()\n todo.complete = True\n db.session.commit()\n\n return redirect(url_for('home'))\n\n@app.route('/delete/')\ndef delete(id):\n todo = Todo.query.filter_by(id=int(id)).first()\n db.session.delete(todo)\n db.session.commit()\n\n return redirect(url_for('home'))\n\n@app.route('/update/', methods=['GET', 'POST'])\ndef update(id):\n todo = Todo.query.filter_by(id=int(id)).first()\n if request.method == 'POST':\n todo.text = request.form['todotext']\n db.session.commit()\n return redirect(url_for('home'))\n else:\n return render_template('update.html', todo=todo)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444850960","text":"# coding: utf-8\n#!/usr/bin/env python3\n\nimport redis\nimport configparser\nfrom flask_googlemaps import GoogleMaps\nfrom flask_googlemaps import Map, icons\nfrom flask import Flask, render_template, Response\n\n\napp = Flask(__name__, template_folder=\"templates\")\n\n# you can set key as config\napp.config['GOOGLEMAPS_KEY'] = \"\"\nGoogleMaps(app, key=\"\")\n\nconfig = configparser.ConfigParser()\nconfig.read('TapAndMap.conf')\n\n\n@app.route(\"/\")\ndef mapview():\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n markersL = [{'icon': icons.alpha.A,\n 'lat': config['all']['HomeLat'],\n 'lng': config['all']['HomeLong'],\n 'infobox': \"This is your TapAndMap server. IP: \" +\n config['all']['TapAndMapIP']}]\n geolines = []\n for key in r.scan_iter(\"*\"):\n if key.split(b':')[1] == b'1': # ICMP\n icon = icons.dots.blue\n line = '#0000FF'\n elif key.split(b':')[1] == b'6': # TCP\n icon = icons.dots.yellow\n line = '#FFFF00'\n elif key.split(b':')[1] == b'17': # UDP\n icon = icons.dots.green\n line = '#00FF00'\n try:\n markersL.append({'icon': icon,\n 'lat': r.get(key).split(b'x')[0].decode(\"UTF-8\"),\n 'lng': r.get(key).split(b'x')[1].decode(\"UTF-8\"),\n 'infobox': 'IP:' +\n key.split(b':')[0].decode(\"UTF-8\"),\n })\n\n pathList = [{'lat': float(config['all']['HomeLat']),\n 'lng': float(config['all']['HomeLong'])},\n {'lat': float(\n r.get(key).split(b'x')[0].decode(\"UTF-8\")),\n 'lng': float(\n r.get(key).split(b'x')[1].decode(\"UTF-8\"))}]\n\n geolines.append({'stroke_color': line,\n 'stroke_opacity': 1.0,\n 'stroke_weight': 3,\n 'geodesic': True,\n 'path': pathList\n }\n )\n except IndexError:\n pass\n\n tap_and_map = Map(\n identifier=\"tapandmap\",\n varname=\"tapandmap\",\n lat=config['all']['HomeLat'],\n lng=config['all']['HomeLong'],\n style=\"height:100vh;width:70vw;margin:0;float:left;\",\n zoom=config['all']['ZoomLevel'],\n fit_markers_to_bounds=True,\n polylines=geolines,\n markers=markersL\n )\n\n return render_template(\n 'index.html',\n tap_and_map=tap_and_map,\n )\n\n\n@app.errorhandler(404)\ndef not_found(exc):\n return Response('

There is only one page, and this is not it'), 404\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, use_reloader=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"475245821","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n#\n# This file is base on EditableGrid example.\n# http://editablegrid.net\n#\n# Copyright 2018 by ceprio\n# This file is part of editablegrid-python-sqlite-example which is released under the MIT License.\n# See file LICENCE_1 or go to https://github.com/ceprio/editablegrid-python-sqlite-example for \n# full license details.\n\n# This script loads data from the database and returns it to the js\n\nimport config\nimport sqlite3\nfrom bs4 import BeautifulSoup\n\ndef main(_POST):\n try:\n ret = \"error\"\n con = sqlite3.connect(config['db_name'])\n\n # Get all parameter provided by the javascript\n name = BeautifulSoup(_POST['name'], \"lxml\").get_text()\n firstname = BeautifulSoup(_POST['firstname'], \"lxml\").get_text()\n tablename = BeautifulSoup(_POST['tablename'], \"lxml\").get_text()\n\n cur = con.execute(\"INSERT INTO \" + tablename + \" (name, firstname) VALUES ( ?, ?)\", (name, firstname))\n data = cur.fetchall()\n if not data:\n con.commit()\n ret = \"ok\"\n\n# except sqlite3.Error as e:\n# self.log.error(\"Database error: %s\" % e)\n# except Exception as e:\n# self.log.error(\"Exception in _query: %s\" % e)\n finally:\n if con:\n con.close()\n return ret\n\nif __name__ == \"__main__\":\n print(main({'name' : 'Pronovsot', 'firstname' : 'Christian', 'tablename': 'demo'}))\n\n\n","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"346165919","text":"# -*- coding: utf-8 -*-\n# vi:si:et:sw=4:sts=4:ts=4\n# @Time : 2021/4/8 9:01 PM\n# @Author : zhangsong\n\nimport os\nimport random\nimport tensorflow as tf\nimport boto3\n\n# example:https://tensorflow.google.cn/tutorials/load_data/images?hl=zh_cn\n# init s3 env for tensorflow s3 driver\nos.environ['AWS_ACCESS_KEY_ID'] = \"empty\"\nos.environ['AWS_SECRET_ACCESS_KEY'] = \"empty\"\nos.environ['S3_ENDPOINT'] = \"localhost:8333\"\nos.environ['S3_USE_HTTPS'] = \"0\"\nos.environ['S3_VERIFY_SSL'] = \"0\"\n\n# init s3 info for boto3 driver\naws_access_key_id = \"empty\"\naws_secret_access_key = \"empty\"\naws_endpoint_url = \"http://localhost:8333\"\n\nbucket_name = \"tensorflowbucket\"\nprefix = \"flower_photos/\"\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\n\ns3_client = boto3.client(\"s3\", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key,\n endpoint_url=aws_endpoint_url)\n\n# response structure of list_objects_v2():\n# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_objects_v2\nall_image_paths = list(\"s3://{}/{}\".format(bucket_name, obj['Key']) for obj in\n s3_client.list_objects_v2(Bucket=bucket_name, Prefix=prefix)['Contents'])\nrandom.shuffle(all_image_paths)\nlabel_names = sorted(prefix['Prefix'].rstrip('/').split('/')[-1] for prefix in\n s3_client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix=prefix)['CommonPrefixes'])\nlabel_to_index = dict((name, index) for index, name in enumerate(label_names))\nall_image_labels = [label_to_index[path.split(\"/\")[-2]] for path in all_image_paths]\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [192, 192])\n image /= 255.0 # normalize to [0,1] range\n\n return image\n\n\ndef load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)\n\n\npath_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)\nimage_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)\nlabel_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))\nimage_label_ds = tf.data.Dataset.zip((image_ds, label_ds))\n\nBATCH_SIZE = 32\n\nds = image_label_ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=len(all_image_paths)))\nds = ds.batch(BATCH_SIZE)\nds = ds.prefetch(buffer_size=AUTOTUNE)\n\nmobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)\nmobile_net.trainable = False\n\n\ndef change_range(image, label):\n return 2 * image - 1, label\n\n\nkeras_ds = ds.map(change_range)\n\nmodel = tf.keras.Sequential([\n mobile_net,\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(len(label_names), activation='softmax')])\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(),\n loss='sparse_categorical_crossentropy',\n metrics=[\"accuracy\"])\n\nsteps_per_epoch = tf.math.ceil(len(all_image_paths) / BATCH_SIZE).numpy()\n\nmodel.fit(ds, epochs=1, steps_per_epoch=3)\n","sub_path":"tensorflow_on_s3.py","file_name":"tensorflow_on_s3.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"13525384","text":"#!/usr/bin/env python\n\nimport usb.core\nimport usb.util\nimport time\nimport RPi.GPIO as GPIO\n\n#HID device used\nVENDOR_ID = 0x05a4 #replace with your HID vendor ID\nPRODUCT_ID = 0x0102 #replace with your HID product ID\n\n#HID keys initialise\nKEY_MINUS = 86 # will set pin HIGH\nKEY_PLUS = 87 # will set pin LOW\nKEY_ENTER = 88 # unused TBA\nKEY_NUM_1 = 89 # will control GPIO 4\nKEY_NUM_2 = 90 # will control GPIO 17\nKEY_NUM_3 = 91 # will control GPIO 27\nKEY_NUM_4 = 92 # will control GPIO 22\nKEY_NUM_5 = 93 # will control GPIO 23\nKEY_NUM_6 = 94 # will control GPIO 24\nKEY_NUM_7 = 95 # will control GPIO 25\nKEY_NUM_8 = 96 # will control GPIO 18\nKEY_NUM_9 = 97 # unused TBA\nKEY_NUM_0 = 98 # will reset all GPIO\nKEY_DEL_0 = 99 # will break out of program\n\n#GPIO initialise\nGPIO.setmode(GPIO.BCM) # using BCM scheme\nGPIO.setup(4, GPIO.OUT) # using GPIO 4 as OUT\nGPIO.setup(17, GPIO.OUT) # using GPIO 17 as OUT\nGPIO.setup(27, GPIO.OUT) # using GPIO 27 as OUT\nGPIO.setup(22, GPIO.OUT) # using GPIO 22 as OUT\nGPIO.setup(23, GPIO.OUT) # using GPIO 23 as OUT\nGPIO.setup(24, GPIO.OUT) # using GPIO 24 as OUT\nGPIO.setup(25, GPIO.OUT) # using GPIO 25 as OUT\nGPIO.setup(18, GPIO.OUT) # using GPIO 18 as OUT\npinlist = [4,17,27,22,23,24,25,18]\n\n#USB initialise\nUSB_IF = 0\nUSB_TIMEOUT = 5\n\ndev = usb.core.find(idVendor=VENDOR_ID, idProduct=PRODUCT_ID)\nendpoint = dev[0][(0,0)][0]\n\nif dev.is_kernel_driver_active(USB_IF) is True:\n\tdev.detach_kernel_driver(USB_IF)\nusb.util.claim_interface(dev, USB_IF)\n\n#listening for key events\nwhile True:\n\tcontrol = None\n\ttry:\n\t\tcontrol = dev.read(endpoint.bEndpointAddress, endpoint.wMaxPacketSize, USB_TIMEOUT)\n\t\tif KEY_NUM_1 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(4,1)\n\t\telif KEY_NUM_1 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(4,0)\n\t\telif KEY_NUM_2 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(17,1)\n\t\telif KEY_NUM_2 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(17,0)\n\t\telif KEY_NUM_3 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(27,1)\n\t\telif KEY_NUM_3 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(27,0)\n\t\telif KEY_NUM_4 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(22,1)\n\t\telif KEY_NUM_4 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(22,0)\n\t\telif KEY_NUM_5 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(23,1)\n\t\telif KEY_NUM_5 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(23,0)\n\t\telif KEY_NUM_6 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(24,1)\n\t\telif KEY_NUM_6 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(24,0)\n\t\telif KEY_NUM_7 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(25,1)\n\t\telif KEY_NUM_7 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(25,0)\n\t\telif KEY_NUM_8 in control and KEY_PLUS in control:\n\t\t\tGPIO.output(18,1)\n\t\telif KEY_NUM_8 in control and KEY_MINUS in control:\n\t\t\tGPIO.output(18,0)\n\t\telif KEY_NUM_0 in control:\n\t\t\tfor i in pinlist:\n\t\t\t\tGPIO.output(i,0)\n\t\telif KEY_DEL_0 in control:\n\t\t\tbreak\n\t\telse:\n\t\t\tpass\n\texcept:\n\t\tpass\n\ttime.sleep(0.1)\nGPIO.cleanup()\n","sub_path":"HIDkeys/HIDgpio.py","file_name":"HIDgpio.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"276576948","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__:hp\n\n__mtime__ = '2020-04-01'\nfrom selenium import webdriver\nfrom time import sleep\n\"\"\"\n陈旧的元素引用:元素没有附加到页面文档\n查找元素是引用过期,页面刷新后,之前查找到的元素被更新了,导致元素不能正常使用\nselenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document\n (Session info: chrome=80.0.3987.122)\n\n\"\"\"\n\ndef deleteAllCourse():\n driver = webdriver.Chrome()\n driver.get(\"http://localhost:90/mgr/login/login.html\")\n driver.implicitly_wait(10)\n # driver.find_element_by_id(\"username\").click()\n # driver.find_element_by_id(\"password\").click()\n driver.find_element_by_class_name(\"btn.btn-success\").click()\n driver.implicitly_wait(2)\n while True:\n delete_buttons = driver.find_elements_by_xpath(\"//tbody/tr/td[4]/button[2]\")\n # while True:\n print(len(delete_buttons))\n if delete_buttons == []:\n print(\"删除完毕\")\n break\n sleep(2)\n delete_buttons[0].click()\n sleep(2)\n driver.find_element_by_class_name(\"btn.btn-primary\").click()\n sleep(2)\n\n driver.quit()\n\nif __name__ == '__main__':\n deleteAllCourse()\n","sub_path":"homework/robotTest/homework4/pylib/courseaction.py","file_name":"courseaction.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"461999610","text":"#!/usr/bin/python3\n\"\"\"\nTakes in a letter and sends a POST request to 8-json_api.py\nwith the letter as a parameter.\n\"\"\"\nif __name__ == \"__main__\":\n import requests\n from sys import argv\n\n if len(argv) == 1:\n dic = {'q': \"\"}\n else:\n dic = {'q': argv[1]}\n\n response = requests.post(url='http://0.0.0.0:5000/search_user', data=dic)\n d = response.json()\n if response.headers.get('content-type') != 'application/json':\n print('Not a valid JSON')\n elif d == {}:\n print('No result')\n else:\n print('[{}] {}'.format(d['id'], d['name']))\n","sub_path":"0x11-python-network_1/8-json_api.py","file_name":"8-json_api.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"571723568","text":"class Vozilo(object):\n\n def __init__(self, znamka, model, st_prevoz_km, zadnji_servis):\n\n self.znamka = znamka\n self.model = model\n self.st_prevoz_km = st_prevoz_km\n self.zadnji_servis = zadnji_servis\n\n def znamka_model(self):\n return self.znamka + \", \" + self.model\n\n\n\ndef seznam_vozil_(vozni_park):\n for index, vozilo in enumerate(vozni_park):\n print(\"ID: \", str(index))\n print(vozilo.znamka_model())\n print(\"Stevilo prevozenih kilometrov: \" + str(vozilo.st_prevoz_km))\n print(\"Datum zadnjega servisa: \" + vozilo.zadnji_servis)\n print(\"\")\n\n if not vozni_park:\n print(\"Nimas nobenega vozila v voznem parku.\")\n print(\"\")\n\ndef uredi_st_prevoz_km(vozni_park):\n print(\"Vnesi ID vozila, ki bi mu rad spremenil stevilo prevozenih kilometrov.\")\n for index, vozilo in enumerate(vozni_park):\n print(str(index) + \") \" + vozilo.znamka_model())\n\n print(\"\")\n if not vozni_park:\n print(\"Nimas nobenega vozila v voznem parku.\")\n print(\"\")\n\n else:\n id_vozila = int(input(\"Vnesite ID vozila:\"))\n vozilo = vozni_park[id_vozila]\n novo_st_km = float(input(\"Vnesite novo stevilo prevozenih kilometrov:\"))\n vozilo.st_prevoz_km = novo_st_km\n print(vozilo.znamka_model() + \" je bilo uspesno spremenjeno stevilo prevozenih kilometrov.\")\n\n\ndef uredi_zadnji_servis(vozni_park):\n print(\"Vnesi ID vozila, ki bi mu rad spremenil datum zadnjega servisa.\")\n for index, vozilo in enumerate(vozni_park):\n print(str(index) + \") \" + vozilo.znamka_model())\n\n print(\"\")\n if not vozni_park:\n print(\"Nimas nobenega vozila v voznem parku.\")\n print(\"\")\n else:\n\n id_vozila = int(input(\"Vnesite ID vozila?:\"))\n vozilo = vozni_park[id_vozila]\n nov_servis = input(\"Vnesite nov zadnji servis(primer zapisa: 04.03.2018):\")\n vozilo.zadnji_servis = nov_servis\n print(vozilo.znamka_model() + \" je bil usepsno spremenjen zadnji servis.\")\n\ndef dodati_novo_vozilo(vozni_park):\n znamka = input(\"Vnesi znamko vozila: \")\n model = input(\"Vnesi model vozila: \")\n st_prevoz_km = float(input(\"Vnesi stevilo prevozenih kilometrov: \"))\n zadnji_servis = input(\"Vnesi zadnji servis vozila(primer zapisa: 04.03.2018):\")\n\n novo_vozilo = Vozilo(znamka = znamka,model = model, st_prevoz_km = st_prevoz_km, zadnji_servis = zadnji_servis )\n vozni_park.append(novo_vozilo)\n\n print(\"\")\n print(novo_vozilo.znamka_model() + \" je bilo usepsno dodano v park vozil.\")\n\ndef izbrisi_vozilo(vozni_park):\n print(\"Vnesi ID vozila, ki bi ga rad izbrisal.\")\n for index, vozilo in enumerate(vozni_park):\n print(str(index) + \") \" + vozilo.znamka_model())\n\n print(\"\")\n\n if not vozni_park:\n print(\"Nimas nobenega vozila v voznem parku.\")\n print(\"\")\n else:\n\n id_vozila = int(input(\"Vnesite ID vozila?:\"))\n vozilo = vozni_park[id_vozila]\n vozni_park.remove(vozilo)\n print(\"Vozilo je bilo odstranjeno uspesno.\")\n\ndef main():\n print(\"Dobrodosli v voznem parku.\")\n\n #Dodajmo vozila v vozni park\n avto1 = Vozilo(znamka = \"Ford\", model = \"Focus\", st_prevoz_km = 60000, zadnji_servis = \"15.01.2018\")\n avto2 = Vozilo(znamka = \"Audi\", model = \"A4\", st_prevoz_km = 33000, zadnji_servis = \"12.12.2017\")\n avto3 = Vozilo(znamka = \"Citroen\", model = \"Berlingo\", st_prevoz_km = 150000, zadnji_servis = \"03.04.2018\")\n\n vozni_park = [avto1, avto2, avto3]\n\n while True:\n print(\"Prosim vnesite eno izmed teh moznosti\")\n print(\"1: Poglej vsa vozila\")\n print(\"2: Dodaj novo vozilo\")\n print(\"3: Uredi stevilo prevozenih km na vozilu\")\n print(\"4: Uredi zadnji servis vozila\")\n print(\"5: Izbrisi vozilo iz voznega parka\")\n print(\"6: Koncaj program\")\n\n izbira = input(\"Izberi moznost(1, 2, 3, 4, 5, 6):\")\n print(\"\")\n\n if izbira == \"1\":\n seznam_vozil_(vozni_park)\n elif izbira == \"2\":\n dodati_novo_vozilo(vozni_park)\n elif izbira == \"3\":\n uredi_st_prevoz_km(vozni_park)\n elif izbira == \"4\":\n uredi_zadnji_servis(vozni_park)\n elif izbira == \"5\":\n izbrisi_vozilo(vozni_park)\n elif izbira == \"6\":\n print(\"Hvala, ker ste uporabljali program za upravljanje sluzbenih vozil.\")\n break\n else:\n print(\"Oprostite ta moznost ne obstaja, poskusite se enkrat.\")\n continue\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","sub_path":"upravljanje_sluz_vozil.py","file_name":"upravljanje_sluz_vozil.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"510953014","text":"\nimport pandas as pd\nimport numpy as np\nimport os\n\ndef to_drop_1(x):\n titles= [\"board member\", \"advisor\", \"board\", \"investor\", \"chairman\"\\\n , \"board of directors\", \"executive chairman\", \"investor\", \"angel\", \"angel investor\",\n 'board of advisors', \"board of advisory\", \"member board of directors\", \"board observer\",\n \"board director\", \"advisory board\", \"member\", \"board director\", \"investor and advisor\"]\n if 'advistor' in x:\n return True\n if \"advisory\" in x:\n return True\n if \"investor\" in x:\n return True\n if \"observer\" in x:\n return True\n for c in titles:\n if x==c:\n return True\n else:\n return False\n\ndef comps_worked_before(companies, relationships, founders):\n relationships = relationships.rename(columns={\"relationship_object_id\":\"id\"})\n relationships = relationships\n merged = companies.merge(relationships, how=\"left\", on=\"id\")\n merged.founded_at = pd.to_datetime(merged.founded_at)\n merged[\"to_drop\"] = merged.title.astype(str).map(lambda x: to_drop_1(x.lower()))\n merged = merged[merged.to_drop==False]\n merged = merged.sort_values(by=\"founder\", ascending=False).drop_duplicates([\"id\",\"person_object_id\"])\n\n # Number of companies worked before specific one\n tmp = merged.sort_values(by=[\"person_object_id\",\"founded_at\"]).groupby(\"person_object_id\").cumcount()\n tmp = pd.concat([merged, tmp], axis=1).sort_values(by=[\"person_object_id\",\"founded_at\"])\n tmp = tmp.rename(columns={0:\"worked_count\"})\n tmp.loc[tmp.person_object_id.isnull(),'worked_count']=np.nan\n tmp = tmp[tmp.founder==True]\n\n tmp = tmp[['id',\"worked_count\"]].groupby(\"id\",as_index=False).mean()\\\n .rename(columns={\"worked_count\":\"mean_comp_worked_before\"})\n print(tmp.head())\n companies = companies.merge(tmp, how=\"left\", on=\"id\")\n print(companies.head())\n\n return companies\n\n\nif __name__ == \"__main__\":\n companies = pd.read_csv(os.path.join('..',\"raw_data\",\"companies.csv\"))\n relationships = pd.read_csv(os.path.join('..',\"raw_data\",\"relationships.csv\"))\n founders = pd.read_csv(os.path.join('..',\"raw_data\",\"founders.csv\"))\n\n comps_worked_before(companies, relationships, founders)\n\n","sub_path":"invesscience/joanna_14.py","file_name":"joanna_14.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"496710038","text":"_author_ = 'TVJORNAL'\r\nprint(\"Qual é o codigo do produto? \")\r\nproduto1 = input()\r\n\r\nif produto1 == \"12345\":\r\n print(\"Banana Nanica\")\r\n Banana = 4.49\r\n bn = Banana\r\n n1 = Banana\r\n nee = print(\"O preço é {}\".format (Banana))\r\n\r\nprint(\"Qual é a forma de pagamento? \")\r\npagamento = input()\r\nif pagamento == \"d\":\r\n print(\"Efetue o pagamento\")\r\n print(\"Quanto foi dado? \")\r\n p1 = input(10.00)\r\n p1 = var = 10.00\r\n p2 = input(\"?\")\r\n if p2 == \"s\":\r\n n2 = Banana\r\n n3 = p1\r\n troco = p1 - Banana\r\n print(\"O troco é {}\".format(troco))\r\n print(\"Pagamento efetuado com sucesso!\")\r\nif pagamento == \"cc\":\r\n print(\"Efetue o Pagamento\")\r\n print(\"Quanto foi dado? \")\r\n p5 = input(4.49)\r\n p4 = var = 4.49\r\n p6 = input(\"?\")\r\n if p6 == \"s\":\r\n n2 = Banana\r\n n3 = p4\r\n troco2 = p4 - Banana\r\n print(\"O troco é {}\".format(troco2))","sub_path":"caixa.py","file_name":"caixa.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"216933666","text":"# -*- coding: utf-8 -*-\nfrom flask.ext.script import Manager, Server\nfrom app import app\nfrom app.command import command\n\nmanager = Manager(app)\nmanager.add_command(\"runserver\", Server(host=\"127.0.0.1\", port=\"8088\", use_debugger=True))\nmanager.add_command(\"custom\", command.CustomMangerService)\n\n\n@manager.option('-n', '--name', dest='name', default='liuzhi')\n@manager.option('-a', '--age', dest='age', default='liuzhi')\ndef hell(name, age):\n print(name+age)\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"612412606","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport urllib3\nurllib3.disable_warnings()\n\ndef get_soup(url) :\n\t\n\theaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0.2 Safari/604.4.7',\n\t}\t\n\n\tres = requests.get(url, headers=headers,verify=False)\t\n\t\n\tif res.status_code == 200 :\n\t\tsoup = bs(res.text,\"lxml\")\n\t\treturn soup\n\telse :\n\t\tmsg = \"Problem at get_soup\"\n\t\treturn \"request_error : \" + str(res.status_code)\n\n\n'''\nresult = requests.get(\"http://theverge.com\",verify=False)\nif result.status_code == 200:\n\tprint(result.status_code)\n\n\tc = result.content\n\n\tsoup = BeautifulSoup(c)\n\tsamples = soup.find_all(\"cell col span-1-2 alignBot right-col\")\n\tprint(samples)\n\nelse:\n\tprint(result.status_code)\n'''\n\nurl = 'https://projecteuler.net/countries'\nsoup = get_soup(url)\n\nprint(soup)\n'''\njobTitle = soup.find_all('div',attrs={'class' : 'jobTitle strong noMargTop margBotLg'})\n\ntitle_l = []\nfor title in jobTitle:\n\ttitle_l.append(title.text)\n\nlinks = soup.find_all('div',attrs={'class' : 'cell col span-1-2 noPadLt'})\n\nsalary = []\nfor link in links:\n\tif \"Median Base Salary\" in link.text:\n\t\t\n\t\tsalary.append(link.text)\n\nfor t in salary:\n\ttext = t.replace(\"Median Base Salary\",\"\")\n\ttt = text.replace(\",\",\"\")\n\tprint(tt[1:])\n\n\nprint(len(salary))\n'''","sub_path":"ProjectEuler/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"232351398","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Ed Mountjoy\n#\n\nimport sys\nimport os\nimport argparse\nimport pandas as pd\nfrom pprint import pprint\nfrom collections import OrderedDict\nfrom parquet_writer import write_parquet\n\ndef main():\n\n # Parse args\n args = parse_args()\n\n # Load json, only keep type == gwas\n credset = pd.read_json(args.inf, orient='records', lines=True)\n credset = credset.loc[credset['type'] == 'gwas', :]\n\n # Filter to remove rows not in a 95% credible set\n credset = credset.loc[credset.is95_credset == True, :]\n\n # Rename and select columns\n cols = OrderedDict([\n ('study_id', 'study_id'),\n ('lead_chrom', 'lead_chrom'),\n ('lead_pos', 'lead_pos'),\n ('lead_ref', 'lead_ref'),\n ('lead_alt', 'lead_alt'),\n ('tag_chrom', 'tag_chrom'),\n ('tag_pos', 'tag_pos'),\n ('tag_ref', 'tag_ref'),\n ('tag_alt', 'tag_alt'),\n ('logABF', 'log10_ABF'),\n ('postprob', 'posterior_prob')\n ])\n credset = ( credset.loc[:, list(cols.keys())]\n .rename(columns=cols) )\n\n # Coerce data types\n dtypes = OrderedDict([\n ('study_id', 'str'),\n ('lead_chrom', 'str'),\n ('lead_pos', 'Int64'),\n ('lead_ref', 'str'),\n ('lead_alt', 'str'),\n ('tag_chrom', 'str'),\n ('tag_pos', 'Int64'),\n ('tag_ref', 'str'),\n ('tag_alt', 'str'),\n ('log10_ABF', 'float64'),\n ('posterior_prob', 'float64')\n ])\n assert(set(dtypes.keys()) == set(credset.columns))\n credset = (\n credset.loc[:, dtypes.keys()]\n .astype(dtype=dtypes)\n )\n\n # Sort\n credset = credset.sort_values(\n ['study_id', 'lead_chrom', 'lead_pos', 'lead_ref', 'lead_alt',\n 'tag_chrom', 'tag_pos', 'tag_ref', 'tag_alt']\n )\n\n # Save as parquet\n write_parquet(credset,\n args.outf,\n compression='snappy',\n flavor='spark')\n\ndef parse_args():\n \"\"\" Load command line args \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--inf', metavar=\"\", help=('Credible set json'), type=str, required=True)\n parser.add_argument('--outf', metavar=\"\", help=(\"Output\"), type=str, required=True)\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"scripts/format_finemapping_table.py","file_name":"format_finemapping_table.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"369958747","text":"\"\"\"\r\nA module to use for validating user input \r\nis a float or integer within specified limits\r\n\"\"\"\r\n\r\n# an example python module\r\n# by Erin Coffey\r\n# 15 January 2018\r\n\r\ndef get_float(message, high, low=0):\r\n \"\"\"\r\n Takes a message from the caller and displays it in the console\r\n Takes a max and optional min value\r\n Accepts user input from console\r\n Returns a valid float value\r\n \"\"\"\r\n\r\n while True:\r\n try:\r\n floatValue = float(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if floatValue <= low or floatValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return floatValue\r\n# end get_float\r\n\r\ndef get_int(message, high, low=0):\r\n \"\"\"\r\n Takes a message from the caller and displays it in the console\r\n Takes a max and optional min value\r\n Accepts user input from console\r\n Returns a valid integer value\r\n \"\"\"\r\n intValue = 1\r\n while True:\r\n try:\r\n intValue = int(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if intValue <= low or intValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return intValue\r\n# end get_int()\r\n\r\n# use main for testing the functions in this module\r\ndef main():\r\n print (\"\\nTesting get_float('enter float:', 100, 0)\\n\\n\")\r\n myFloat = get_float(\"enter float:\\t\", 100, 0)\r\n print(\"The float value returned is \" + str(myFloat))\r\n\r\n print (\"\\nTesting get_int('enter int:', 100, 0)\\n\\n\")\r\n myInt = get_int(\"enter integer:\\t\", 100, 0)\r\n print(\"The integer value returned is \" + str(myInt))\r\n\r\n# if this is the main module, run the tests in main()\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"modules/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"278194534","text":"import numpy as np\n\nfrom kramersmoyal import km\nfrom kramersmoyal import kernels\n\ndef test_kmc():\n for t in [1,0.1,0.01,0.001]:\n for lag in [None, [1,2,3]]:\n\n X = np.random.normal(loc = 0, scale = np.sqrt(t), size = 10000)\n\n bins = np.array([5000])\n\n powers = np.array([[1], [2]])\n\n bw = 0.15\n\n # The kmc holds the results, where edges holds the binning space\n kmc, edges = km(X, kernel = kernels.epanechnikov, bw = bw,\n bins = bins, powers = powers)\n\n assert isinstance(kmc, np.ndarray)\n assert isinstance(edges[0], np.ndarray)\n\n kmc, edges = km(X, kernel = kernels.epanechnikov, bins = bins,\n powers = powers)\n\n assert isinstance(kmc, np.ndarray)\n assert isinstance(edges[0], np.ndarray)\n","sub_path":"test/kmc_test.py","file_name":"kmc_test.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"39383076","text":"from tkinter import *\r\n\r\n#开窗口\r\nwindow = Tk()\r\nwindow.title(\"Welcome to LikeGeeks app\")\r\nwindow.geometry('200x250')\r\n\r\n#Listbox列表框\r\nlbl = Label(window, text = \"A list of favourite countries...\")\r\nlistbox = Listbox(window)\r\nlistbox.insert(1,\"India\")\r\nlistbox.insert(2,\"USA\")\r\nlistbox.insert(3,\"Japan\")\r\nlistbox.insert(4,\"Austrelia\")\r\nbtn = Button(window, text = \"delete\", command = lambda listbox=listbox: listbox.delete(ANCHOR))\r\n\r\nlbl.pack()\r\nlistbox.pack()\r\nbtn.pack()\r\n\r\n#保持窗口\r\nwindow.mainloop()","sub_path":"Tkinter/t19.py","file_name":"t19.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"181796261","text":"import time\nimport subprocess\nimport random\n\ndef get_free_node(occupied_node):\n '''Возвращает название свободной ноды, на которую возможно отправить задачу'''\n\n# squeue = subprocess.run('ssh shipilov.ab@calc.cod.phystech.edu \"squeue\"',\n# capture_output=True, shell=True, check=True, text=True).stdout\n squeue = subprocess.run([\"ssh\", \"shipilov.ab@calc.cod.phystech.edu\", \"'squeue'\"],\n capture_output=True, check=True, text=True).stdout\n content=list(map(lambda x: x.split(), squeue.strip().split('\\n')))\n possible_nodes=list(map(lambda x: x[8], content))[1:]\n\n #удаляем ноды, которые ещё не выделены и у них в статусе указано \"(Priority)\"\n possible_nodes=list(filter(lambda element: element!='(Priority)', possible_nodes))\n\n number_of_processes = float('inf')\n\n while number_of_processes > 90:\n if set(occupied_node) == set(possible_nodes):\n occupied_node=[]\n# print('sleep & clean')\n time.sleep(30)\n\n node = random.choice(list(set(possible_nodes) - set(occupied_node)))\n# number_of_processes = len(subprocess.run(f'ssh shipilov.ab@calc.cod.phystech.edu \"ssh {node} ps aux | grep shipilov.ab\"'\n# , capture_output=True, shell=True, check=True, text=True).stdout.strip().split('\\n'))\n number_of_processes = len(subprocess.run([\"ssh\", \"shipilov.ab@calc.cod.phystech.edu\", \"ssh\", f\"{node}\", \"ps\", \"aux\", \"|\", \"grep\", \"shipilo\"]\n , capture_output=True, check=True, text=True).stdout.strip().split('\\n'))\n\n occupied_node.append(node)\n print(node, end='')\n return node\n\nget_free_node(occupied_node=[])\n","sub_path":"Submission/remote_free_nodes.py","file_name":"remote_free_nodes.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"231231020","text":"import pyNN.spiNNaker as p\nfrom matplotlib import pylab\n\np.setup(1.0)\n\n# p.set_number_of_neurons_per_core(p.SpikeSourcePoisson, 27)\n# p.set_number_of_neurons_per_core(p.IF_curr_exp, 22)\n\ninp = p.Population(100, p.SpikeSourcePoisson, {\"rate\": 100}, label=\"input\")\npop = p.Population(100, p.IF_curr_exp, {}, label=\"pop\")\n\np.Projection(inp, pop, p.OneToOneConnector(weights=5.0))\n\npop.record()\ninp.record()\n\np.run(100)\n\ninp.set(\"rate\", 10)\n# pop.set(\"cm\", 0.25)\npop.set(\"tau_syn_E\", 1)\n\np.run(100)\n\npop_spikes = pop.getSpikes()\ninp_spikes = inp.getSpikes()\n\npylab.subplot(2, 1, 1)\npylab.plot(inp_spikes[:, 1], inp_spikes[:, 0], \"r.\")\npylab.subplot(2, 1, 2)\npylab.plot(pop_spikes[:, 1], pop_spikes[:, 0], \"b.\")\npylab.show()\n\np.reset()\n\ninp.set(\"rate\", 0)\npop.set(\"i_offset\", 1.0)\npop.initialize(\"v\", p.RandomDistribution(\"uniform\", [-65.0, -55.0]))\n\np.run(100)\n\npop_spikes = pop.getSpikes()\ninp_spikes = inp.getSpikes()\n\npylab.subplot(2, 1, 1)\npylab.plot(inp_spikes[:, 1], inp_spikes[:, 0], \"r.\")\npylab.subplot(2, 1, 2)\npylab.plot(pop_spikes[:, 1], pop_spikes[:, 0], \"b.\")\npylab.show()\n\np.end()\n","sub_path":"integration_tests/change_neuron_parameters_between_runs/change_parameter_test.py","file_name":"change_parameter_test.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"90557819","text":"# Copyright (C) 2017 Catalyst IT Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport mock\n\nfrom distil.erp import utils as erp_utils\nfrom distil.db.sqlalchemy import api as db_api\nfrom distil.service.api.v2 import health\nfrom distil.tests.unit import base\n\n\nclass HealthTest(base.DistilWithDbTestCase):\n def setUp(self):\n super(HealthTest, self).setUp()\n erp_utils._ERP_DRIVER = None\n\n @mock.patch('distil.common.openstack.get_projects')\n def test_get_health_ok(self, mock_get_projects):\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n ]\n\n # Insert projects in the database.\n project_1_collect = datetime.utcnow() - timedelta(hours=1)\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n project_1_collect\n )\n project_2_collect = datetime.utcnow() - timedelta(hours=2)\n db_api.project_add(\n {\n 'id': '222',\n 'name': 'project_2',\n 'description': '',\n },\n project_2_collect\n )\n\n ret = health.get_health()\n\n self.assertEqual('OK', ret['usage_collection'].get('status'))\n\n @mock.patch('distil.common.openstack.get_projects')\n def test_get_health_fail(self, mock_get_projects):\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n ]\n\n # Insert projects in the database.\n project_1_collect = datetime.utcnow() - timedelta(days=2)\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n project_1_collect\n )\n project_2_collect = datetime.utcnow() - timedelta(hours=25)\n db_api.project_add(\n {\n 'id': '222',\n 'name': 'project_2',\n 'description': '',\n },\n project_2_collect\n )\n\n ret = health.get_health()\n projects = ret['usage_collection'].get('failed_projects')\n\n self.assertIsNotNone(projects)\n self.assertEqual(2, len(projects))\n self.assertEqual('FAIL', ret['usage_collection'].get('status'))\n self.assertIn('2', ret['usage_collection'].get('msg'))\n\n p_names = [p['name'] for p in projects]\n p_ids = [p['id'] for p in projects]\n\n self.assertEqual([\"project_1\", \"project_2\"], p_names)\n self.assertEqual([\"111\", \"222\"], p_ids)\n\n @mock.patch('odoorpc.ODOO')\n @mock.patch('distil.common.openstack.get_projects')\n def test_get_health_with_erp_backend_fail(self, mock_get_projects,\n mock_odoo):\n new = mock.MagicMock()\n new.db.list.side_effect = Exception('Boom!')\n mock_odoo.return_value = new\n # mock_odoo.side_effect = ValueError\n ret = health.get_health()\n\n self.assertEqual('FAIL', ret['erp_backend'].get('status'))\n\n @mock.patch('odoorpc.ODOO')\n @mock.patch('distil.common.openstack.get_projects')\n def test_get_health_with_erp_backend(self, mock_get_projects, mock_odoo):\n ret = health.get_health()\n\n self.assertEqual('OK', ret['erp_backend'].get('status'))\n\n @mock.patch('distil.common.openstack.get_projects')\n def test_get_health_with_ignore_tenants(self, mock_get_projects):\n self.override_config('collector', ignore_tenants=['project_2'])\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n ]\n\n # Insert projects in the database.\n project_1_collect = datetime.utcnow() - timedelta(days=2)\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n project_1_collect\n )\n project_2_collect = datetime.utcnow() - timedelta(hours=25)\n db_api.project_add(\n {\n 'id': '222',\n 'name': 'project_2',\n 'description': '',\n },\n project_2_collect\n )\n\n ret = health.get_health()\n\n self.assertEqual('FAIL', ret['usage_collection'].get('status'))\n self.assertIn('1', ret['usage_collection'].get('msg'))\n\n","sub_path":"distil/tests/unit/service/api/v2/test_health.py","file_name":"test_health.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"147445603","text":"# -*- coding:utf-8 -*-\nimport os\nimport sys\nimport logging\nimport logging.handlers\n\nfrom ying.cfg import cfg\n\ndef create_logger(name, filename):\n root = logging.getLogger(name)\n FORMAT = '[%(levelname)-8s] [%(asctime)s] [%(name)s:%(lineno)d] %(message)s'\n DATE_FORMAT = '%Y-%m-%d %H:%M:%S'\n channel = logging.handlers.RotatingFileHandler(\n filename=filename,\n maxBytes=100000000,\n backupCount=10)\n channel.setFormatter(logging.Formatter(fmt=FORMAT, datefmt=DATE_FORMAT))\n root.addHandler(channel)\n\n console = logging.StreamHandler()\n console.setFormatter(logging.Formatter(fmt=FORMAT, datefmt=DATE_FORMAT))\n root.addHandler(console)\n\n root.setLevel(getattr(logging, cfg.log_level.upper(), logging.DEBUG))\n return logging.getLogger(name)\n\nloggers = {}\n\ndef getLogger(name):\n if name not in loggers:\n if not os.path.isdir(cfg.log_dir):\n os.makedirs(cfg.log_dir)\n loggers[name] = create_logger(name, os.path.join(cfg.log_dir, cfg.log_file))\n return loggers[name]\n","sub_path":"ying/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"530684207","text":"import socket\nimport time\nimport threading\nfrom queue import Queue\n\n#Author @inforkgodara\n\nsocket.setdefaulttimeout(0.25)\nlock = threading.Lock()\n\nip_address = input('IP Address: ')\nhost = socket.gethostbyname(ip_address)\nprint ('Scanning on IP Address: ', host)\n\ndef scan(port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n con = sock.connect((host, port))\n with lock:\n print(port, 'is open')\n con.close()\n except:\n pass\n\ndef execute():\n while True:\n worker = queue.get()\n scan(worker)\n queue.task_done()\n \nqueue = Queue()\nstart_time = time.time()\n \nfor x in range(100):\n thread = threading.Thread(target = execute)\n thread.daemon = True\n thread.start()\n \nfor worker in range(1, 500):\n queue.put(worker)\n \nqueue.join()\n\nprint('Time taken:', time.time() - start_time)","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"347976551","text":"# PyBank Challenge\n# Module for OS\nimport os\n\n# Module for subprocess/terminal output\n# import subprocess\n# with open(\"output.txt\", \"w+\") as output:\n# subprocess.call([\"python\", \"./main.py\"], stdout=output)\n\n# Module for reading CSV file\nimport csv\n\ncsvpath = os.path.join('Resources', 'budget_data.csv')\n\n# Improved Reading using CSV module\n\nwith open(csvpath, newline='') as csvfile:\n \n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # Get total number of months\n total_months = 0\n total_net_amount = 0\n separated_month = []\n separated_amount = []\n for row in csvreader:\n separated_month.append(row[0])\n separated_amount.append(row[1])\n total_months += 1\n \n # Get total net amount of Profit/Loss\n \n separated_month.pop(0)\n separated_amount.pop(0)\n \n int_separated_amount = [int(x) for x in separated_amount]\n\n total_net_amount = sum(int_separated_amount)\n\n # Get the average change in Profit/Loss\n average_change = 0\n separated_month.pop(0)\n monthly_change_list = [int_separated_amount[i+1] - int_separated_amount[i] for i in range(len(int_separated_amount) -1)]\n average_change = sum(monthly_change_list)/len(monthly_change_list)\n \n # Get the greatest increase in profits (date and amount) over the entire period\n\n s_month, m_change_list = separated_month, monthly_change_list \n \n greatest_increase = max(m_change_list)\n greatest_increase_index = m_change_list.index(max(m_change_list))\n\n \n # Get the greatest decrease in losses (date and amount) over the entire period\n greatest_decrease = min(monthly_change_list)\n greatest_decrease_index = monthly_change_list.index(min(monthly_change_list))\n\n print(\"Financial Analysis\")\n print(\"-----------------------------\")\n print(\"Total Months: \" + str(total_months-1))\n print(\"Total: $\"+ str(total_net_amount))\n print(\"Average Change: $\" + str(round(average_change, 2)))\n print(\"Greatest Increase in Profits: \" + s_month[greatest_increase_index] + \" ($\" + str(greatest_increase) + \")\")\n print(\"Greatest Decrease in Profits: \" + s_month[greatest_decrease_index] + \" ($\" + str(greatest_decrease) + \")\")","sub_path":"PyBank_main.py","file_name":"PyBank_main.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"507232413","text":"from character import Character\nfrom monster import Dragon\nfrom monster import Goblin\nfrom monster import Troll\nimport os\n\nclass Game:\n def setup(self):\n self.player = Character()\n self.monsters = [\n Goblin(),\n Troll(),\n Dragon()\n ]\n self.monster = self.get_next_monster()\n\n def get_next_monster(self):\n try:\n return self.monsters.pop(0)\n except IndexError:\n return None\n\n def monster_turn(self):\n # Check to see if the monster attacks\n if self.monster.attack():\n # if so tell the player\n print(\"Watch out the monster is comming at you\")\n # check if the player wants to dodge\n if input(\"Try to dodge the attack Y/n? > \").lower() != 'n':\n # if so, see if the dodge is successfull\n if self.player.dodge():\n # if it is, move on\n print(\"Phew you dodged that one\")\n else:\n # if its not, remove 1 player hit point\n print(\"Gaaah you did'nt dodge fast enught and the moster hit you you lose 1 HP\")\n self.player.hit_points -= 1\n else:\n # if the monter isn't attack, tell that to the player too\n print(\"the monster aint attacking now's your chance\")\n\n\n def player_turn(self):\n # Let the player attack, rest, or quit\n choice = input(\"Options: [A]ttack, [R]est, [Q]uit\").lower()\n if choice in 'arq':\n # if they attack:\n if choice == 'a':\n # see if the attack is successfull\n if self.player.attack():\n # if so see if the monster dodges\n if not self.monster.dodge():\n if self.player.weapon == \"sword\":\n damage = 2\n elif self.player.weapon == \"axe\":\n damage = 3\n elif self.player.weapon == \"bow\":\n damage = 2\n else:\n print(\"Could'nt find your weapon please advice?\")\n # if not dodged subtract the right num off hit points from the monster\n self.monster.hit_points -= damage\n print(\"You hit the monster with a lethal blow and delt it {} HP Damage\".format(damage))\n else:\n # if dodged print that\n print(\"Aww your werent fast enugh the monster dodged your attack\")\n else:\n # if not a good attack, tell the player\n print(\"bad attack you did do shit!\")\n # if they rest:\n elif choice == 'r':\n print(\"as your resting on the cold ground you fell your energi surging up\")\n # call the player.rest() method\n self.player.rest()\n\n # if they quit, exit the game\n elif choice == 'q':\n exit()\n\n else:\n # if they pick anything else, re-run this method\n self.player_turn()\n\n\n def cleanup(self):\n # if the monster has no more hit points:\n if self.monster.hit_points < 1:\n # up the players experince\n self.player.experince += int(self.monster.experince / 2)\n # print a message\n print(\"You defeated the monster and got {} XP\".format(int(self.monster.experince / 2)))\n # get a new monster\n self.monster = self.get_next_monster()\n\n def __init__(self):\n self.setup()\n\n while self.player.hit_points and (self.monster or self.monsters):\n print('\\n'+'='*20)\n print(self.player)\n print(self.monster)\n self.monster_turn()\n print(\"-\"*20)\n self.player_turn()\n self.cleanup()\n print('\\n'+'='*20)\n\n if self.player.hit_points:\n print(\"You win!\")\n elif self.monsters or self.monster:\n print(\"You loose!\")\n","sub_path":"oop/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"132898320","text":"import pandas as pd\nimport os\n\ncurrent_path = os.path.abspath(os.path.dirname(__file__))\ncsv_path = os.path.join(current_path, 'book_utf8.csv')\nprint(csv_path)\n\ndf1 = pd.read_csv(csv_path)\n# print(df1)\n\nprint(\"*\"*30)\nprint(df1['还行'])","sub_path":"Week04/pdReadCSV.py","file_name":"pdReadCSV.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"169752965","text":"\"\"\"App URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom rest_framework_simplejwt import views as jwt_views\r\nfrom django.conf.urls.static import static\r\nfrom django.conf import settings\r\nfrom rest_framework.documentation import include_docs_urls\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('api/users/', include('users.urls')),\r\n path('api/comments/', include('comments.urls')),\r\n path('api/restaurants/', include('restaurants.urls')),\r\n path('api/registration/', include('registration.urls')),\r\n path('api/authentication/', include('authentication.urls')),\r\n\r\n # API's URL generator\r\n path('api/docs/', include_docs_urls(title='Motion API', permission_classes=[])), # publicly visible\r\n\r\n # Auth\r\n path('api/token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),\r\n path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),\r\n path('api/token/verify/', jwt_views.TokenVerifyView.as_view(), name='token_refresh'),\r\n]\r\n\r\nif settings.DEBUG:\r\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n","sub_path":"app/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"171169774","text":"import os\nimport time\nimport pytest\n\nfrom test.test_utils import CONTAINER_TESTS_PREFIX\n\nPT_PERFORMANCE_INFERENCE_SCRIPT = os.path.join(CONTAINER_TESTS_PREFIX, \"benchmark\", \"run_pytorch_inference_performance.py\")\nPT_PERFORMANCE_INFERENCE_CPU_CMD = f\"{PT_PERFORMANCE_INFERENCE_SCRIPT} --iterations 500\"\nPT_PERFORMANCE_INFERENCE_GPU_CMD = f\"{PT_PERFORMANCE_INFERENCE_SCRIPT} --iterations 1000 --gpu\"\n\n\n@pytest.mark.model(\"resnet18, VGG13, MobileNetV2, GoogleNet, DenseNet121, InceptionV3\")\n@pytest.mark.parametrize(\"ec2_instance_type\", [\"p3.16xlarge\"], indirect=True)\ndef test_performance_ec2_pytorch_inference_gpu(pytorch_inference, ec2_connection, region, gpu_only):\n ec2_performance_pytorch_inference(pytorch_inference, \"gpu\", ec2_connection, region, PT_PERFORMANCE_INFERENCE_GPU_CMD)\n\n\n@pytest.mark.model(\"resnet18, VGG13, MobileNetV2, GoogleNet, DenseNet121, InceptionV3\")\n@pytest.mark.parametrize(\"ec2_instance_type\", [\"c5.18xlarge\"], indirect=True)\ndef test_performance_ec2_pytorch_inference_cpu(pytorch_inference, ec2_connection, region, cpu_only):\n ec2_performance_pytorch_inference(pytorch_inference, \"cpu\", ec2_connection, region, PT_PERFORMANCE_INFERENCE_CPU_CMD)\n\n\ndef ec2_performance_pytorch_inference(image_uri, processor, ec2_connection, region, test_cmd):\n docker_cmd = \"nvidia-docker\" if processor == \"gpu\" else \"docker\"\n python_version = \"py2\" if \"py2\" in image_uri else \"py3\"\n container_test_local_dir = os.path.join(\"$HOME\", \"container_tests\")\n repo_name, image_tag = image_uri.split(\"/\")[-1].split(\":\")\n\n # Make sure we are logged into ECR so we can pull the image\n ec2_connection.run(f\"$(aws ecr get-login --no-include-email --region {region})\", hide=True)\n\n ec2_connection.run(f\"{docker_cmd} pull -q {image_uri} \")\n\n time_str = time.strftime('%Y-%m-%d-%H-%M-%S')\n commit_info = os.getenv(\"CODEBUILD_RESOLVED_SOURCE_VERSION\")\n # Run performance inference command, display benchmark results to console\n container_name = f\"{repo_name}-performance-{image_tag}-ec2\"\n log_file = f\"inference_benchmark_results_{commit_info}_{time_str}.log\"\n ec2_connection.run(\n f\"{docker_cmd} run -d --name {container_name} -e OMP_NUM_THREADS=1 \"\n f\"-v {container_test_local_dir}:{os.path.join(os.sep, 'test')} {image_uri} \"\n )\n ec2_connection.run(\n f\"{docker_cmd} exec {container_name} \"\n f\"python {test_cmd} \"\n f\"2>&1 | tee {log_file}\"\n )\n ec2_connection.run(\n f\"docker rm -f {container_name}\"\n )\n ec2_connection.run(\n f\"echo Benchmark Results: >&2;\"\n f\"echo PyTorch Inference {processor} {python_version} >&2\"\n )\n if python_version == \"py3\":\n ec2_connection.run(f\"tail -28 {log_file} >&2\")\n else:\n ec2_connection.run(f\"cat {log_file} >&2\")\n ec2_connection.run(\n f\"aws s3 cp {log_file} s3://dlinfra-dlc-cicd-performance/pytorch/ec2/inference/{processor}/{python_version}/{log_file}\"\n )\n ec2_connection.run(\n f\"echo To retrieve complete benchmark log, check s3://dlinfra-dlc-cicd-performance/pytorch/ec2/inference/{processor}/{python_version}/{log_file} >&2\"\n )\n","sub_path":"test/dlc_tests/benchmark/ec2/pytorch/inference/test_performance_pytorch_inference.py","file_name":"test_performance_pytorch_inference.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"562677530","text":"from beir import util\nfrom beir.datasets.data_loader import GenericDataLoader\nfrom beir.retrieval.evaluation import EvaluateRetrieval\nfrom beir.retrieval.search.dense import DenseRetrievalExactSearch as DRES\nimport logging\nfrom transformers import BertModel, BertTokenizerFast\nimport os\nimport torch\nfrom torch import nn, Tensor\n\nlogging.basicConfig(level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass DocumentEncoder(nn.Module):\n\n def __init__(self, len_of_token_embeddings: int, device: str, bert_model: str):\n super(DocumentEncoder, self).__init__()\n self.bert = BertModel.from_pretrained(bert_model).to(device)\n self.bert.resize_token_embeddings(len_of_token_embeddings)\n\n def forward(self, token_ids: Tensor, attention_masks: Tensor) -> Tensor:\n hidden_states, cls_tokens = self.bert(token_ids, attention_mask=attention_masks, return_dict=False)\n return cls_tokens\n\n @classmethod\n def from_pretrained(cls, path_to_statedict: str, tokenizer: BertTokenizerFast, device: str, bert_model: str) -> 'BiEncoder':\n document_encoder = cls(len_of_token_embeddings=len(tokenizer), device=device, bert_model=bert_model)\n document_encoder.load_state_dict(torch.load(path_to_statedict, map_location=device))\n return document_encoder\n\n\nclass BeirEval:\n def __init__(self, bert_model, dataset, output_dir, device, batch_size=128):\n self.dataset = dataset\n self.output_dir = output_dir\n self.batch_size = batch_size\n self.device = device\n self.bert_model = bert_model\n\n def evaluate_model(self):\n logger.info(f'starting evaluation dataset {self.dataset}')\n output_dir_data = f'{self.output_dir}/{self.dataset}'\n if os.path.isdir(output_dir_data):\n logger.info(f'dataset {self.dataset} already downloaded')\n data_path = f'{output_dir_data}/{self.dataset}'\n else:\n url = f'https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{self.dataset}.zip'\n data_path = util.download_and_unzip(url, output_dir_data)\n\n corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=\"test\")\n\n tokenizer = BertTokenizerFast.from_pretrained(self.bert_model, do_lower_case=('uncased' in bert_model))\n tokenizer.add_special_tokens({'additional_special_tokens': ['[ent]']})\n\n\n encoder_path = os.path.join(self.output_dir, 'encoder_mention.statedict')\n document_encoder = DocumentEncoder.from_pretrained(path_to_statedict=encoder_path, tokenizer=tokenizer,\n device=self.device, bert_model=self.bert_model)\n document_encoder.eval()\n\n logger.info(f'loading model with batch size {self.batch_size}')\n model = DRES(document_encoder, batch_size=self.batch_size)\n\n retriever = EvaluateRetrieval(model, score_function='cos_sim')\n results = retriever.retrieve(corpus, queries)\n\n ndcg, map, recall, precision = retriever.evaluate(qrels, results, retriever.k_values)\n\n logger.info(f'results: \\n ndcg: {ndcg} \\n map: {map} \\n recall: {recall} \\n precision: {precision}')\n\n\nif __name__ == '__main__':\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n dataset = os.getenv('DATASET', 'trec-covid')\n output_dir = os.getenv('OUTPUT_DIR', '/data')\n batch_size = int(os.getenv('BATCH_SIZE', '128'))\n bert_model = os.getenv('BERT_MODEL', 'bert-base-uncased')\n logger.info(f'using device format {device}')\n logger.info(f'configs for evaluation: \\n BERT_MODEL: {bert_model} \\n DATASET: {dataset} \\n OUTPUT_DIR: {output_dir} \\n BATCH_SIZE: {batch_size}')\n\n\n eval = BeirEval(bert_model, dataset, output_dir, device, batch_size)\n eval.evaluate_model()\n","sub_path":"src/beir_eval.py","file_name":"beir_eval.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"111618608","text":"#A PROGRAM NEM SZERETI A SPACE!!!\r\n\r\ndef split(word):\r\n return [char for char in word]\r\nfasz = \"\"\r\nanyad = []\r\nass = split(input())\r\ni = 0\r\ndef minusencrypt(text, s):\r\n result = \"\"\r\n\r\n\r\n\r\n# transverse the plain text\r\n for i in range(len(ass)):\r\n char = ass[i2]\r\n # Encrypt uppercase characters in plain text\r\n\r\n if (char.isupper()):\r\n result += chr((ord(char) + s - 65) % 26 + 65)\r\n # Encrypt lowercase characters in plain text\r\n else:\r\n result += chr((ord(char) + s - 97) % 26 + 97)\r\n\r\n return result\r\n# check the above function\r\ntext = ass[i]\r\ns = -6\r\n\r\ndef plusencrypt(text, s):\r\n result = \"\"\r\n\r\n\r\n# transverse the plain text\r\n for i in range(len(ass)):\r\n char = ass[i2]\r\n # Encrypt uppercase characters in plain text\r\n\r\n if (char.isupper()):\r\n result += chr((ord(char) + s - 65) % 26 + 65)\r\n # Encrypt lowercase characters in plain text\r\n else:\r\n result += chr((ord(char) + s - 97) % 26 + 97)\r\n\r\n return result\r\n# check the above function\r\ntext = ass[i]\r\ns = 6\r\n\r\ncode = input(\"kód(+-)\")\r\nlistcode = split(code)\r\nfor i in range(len(ass)):\r\n i2 = i\r\n text = ass[i]\r\n if listcode[i]==\"-\":\r\n anyad.append(minusencrypt(ass[i],-6))\r\n elif listcode[i]==\"+\":\r\n anyad.append(plusencrypt(ass[i],6))\r\n else:\r\n exit()\r\nfor i in range(len(anyad)):\r\n lofasz = anyad[i]\r\n nyomorek = lofasz[0]\r\n fasz = fasz+nyomorek\r\n\r\nprint(fasz)","sub_path":"suffelstuff/suffle.py","file_name":"suffle.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"350207322","text":"import argparse\nimport csv\nfrom contextlib import redirect_stdout\nimport os\nfrom pathlib import Path\nimport shutil\nimport struct\n\nfrom mkw.ppc_dis import disasm_iter, disassemble_callback\nfrom mkw.dol import DolBinary, Segment\n\n\nread_u32 = lambda f: struct.unpack(\">L\", f.read(4))[0]\nread_u16 = lambda f: struct.unpack(\">H\", f.read(2))[0]\nread_u8 = lambda f: struct.unpack(\">B\", f.read(1))[0]\n\n\ndef read_segments_iter(name):\n with open(name) as file:\n reader = csv.DictReader(file)\n for row in reader:\n yield row[\"name\"], Segment(int(row[\"start\"], 16), int(row[\"end\"], 16))\n\n\ndef read_segments(name):\n result = {}\n for name, segment in read_segments_iter(name):\n result[name] = segment\n return result\n\n\nclass Slice:\n def __init__(self, obj_file, segments):\n self.obj_file = obj_file\n self.segments = segments\n\n def __repr__(self):\n return \"Slice { %s, %u segs }\" % (self.obj_file, len(self.segments))\n\n\n# Limitation: slices must be ordered\ndef read_slices(name):\n lines = open(name).readlines()\n reader = csv.DictReader(lines)\n for row in reader:\n if not row.pop(\"enabled\"):\n continue\n\n name = row.pop(\"name\")\n segments = {}\n\n for cell, value in row.items():\n segment_attributes = [\"Start\", \"End\"]\n seg_name = \"\"\n seg_type = \"\"\n for attr in segment_attributes:\n if cell.endswith(attr):\n seg_type = attr\n seg_name = cell[: -len(attr)]\n assert seg_name and seg_type\n\n if not value:\n continue\n\n if not seg_name in segments:\n segments[seg_name] = Segment(0, 0)\n\n if seg_type == \"Start\":\n segments[seg_name].begin = int(value, 16)\n elif seg_type == \"End\":\n segments[seg_name].end = int(value, 16)\n\n print(\"#### %s %s\" % (name, segments))\n yield Slice(name, segments)\n\n\ndef get_asm_path(name, gap, folder):\n folder.mkdir(exist_ok=True)\n return folder / (\"%s_%s.s\" % (name, hex(gap.begin)[2:]))\n\n\ndef format_segname(name):\n if \"extab\" in name:\n return name + \"_\"\n return \".\" + name\n\n\ndef read_u32b(filecontent, offset):\n return (\n (filecontent[offset + 0] << 24)\n | (filecontent[offset + 1] << 16)\n | (filecontent[offset + 2] << 8)\n | filecontent[offset + 3]\n )\n\n\n# stdout must be redirected\ndef dump_bss(size):\n print(\".skip 0x%x\" % size)\n\n\n# stdout must be redirected\ndef dump_data(image, addr_start, seg):\n for i in range(seg.begin, seg.end, 4):\n if seg.end - i >= 4:\n print(\".4byte 0x%08X\" % read_u32b(image, i - addr_start))\n continue\n\n for j in range(i, seg.end):\n print(\".byte 0x%02x\" % image[j - addr_start])\n\n\n# stdout must be redirected\ndef dump_text(image, addr_start, seg):\n disasm_iter(\n image, seg.begin - addr_start, seg.begin, seg.size(), disassemble_callback\n )\n\n\ndef compute_perm(name):\n perm = \"wa\"\n if name == \"text\" or name == \"init\":\n perm = \"ax\"\n\n # if \"bss\" in name:\n # perm = \"ba\"\n\n if name == \"rodata\" or \"2\" in name:\n perm = perm.replace(\"w\", \"\")\n\n return perm\n\n\n# stdout must be redirected\ndef dump_section_body(name, image, addr_start, seg):\n if \"bss\" in name:\n dump_bss(seg.size())\n return\n\n if name == \"text\" or name == \"init\":\n dump_text(image, addr_start, seg)\n return\n\n dump_data(image, addr_start, seg)\n\n\n# stdout must be redirected\ndef dump_section_header(name, seg):\n # section permissions\n perm = compute_perm(name)\n\n print(\n '\\n.section %s, \"%s\" # 0x%08X - 0x%08X'\n % (format_segname(name), perm, seg.begin, seg.end)\n )\n\n\n# stdout must be redirected\ndef dump_section(name, image, addr_start, seg):\n dump_section_header(name, seg)\n dump_section_body(name, image, addr_start, seg)\n\n\n# stdout must be redirected\ndef dump_object_file(image, addr_start, segments):\n print('\\n.include \"macros.inc\"')\n\n for segment_name, segment in segments:\n dump_section(segment_name, image, addr_start, segment)\n\n\ndef disassemble_object_file(path, image, addr_start, segments):\n with open(path, \"w\") as file:\n with redirect_stdout(file):\n dump_object_file(image, addr_start, segments)\n\n\ndef disasm(folder, name, image, addr_start, seg, is_data):\n path = get_asm_path(name, seg, folder)\n\n disassemble_object_file(path, image, addr_start, [(name, seg)])\n\n\ndef gen_start_segs(segments):\n # Start segs:\n # ['text']: (0, 0x8...)\n start_seg = {}\n for name, seg in segments.items():\n start_seg[name] = Segment(0, seg.begin)\n\n return start_seg\n\n\ndef gen_end_segs(segments):\n # End segs:\n # ['text']: (0x8..., 0)\n end_seg = {}\n for name, seg in segments.items():\n end_seg[name] = Segment(seg.end, 0)\n\n return end_seg\n\n\ndef find_gaps(all_slices):\n last_segments = all_slices[0].segments\n\n # [1:] to skip initial (previously start_seg)\n for slice_obj in all_slices[1:]:\n obj_file = slice_obj.obj_file\n slice = slice_obj.segments\n for name, segment in slice.items():\n if last_segments[name].end != segment.begin:\n # There's a gap!\n\n print(\n \"[.%s] Gap from %x to %x\"\n % (name, last_segments[name].end, segment.begin)\n )\n yield name, Segment(last_segments[name].end, segment.begin)\n\n last_segments[name] = segment\n if not obj_file.startswith(\"#\"):\n yield obj_file, None\n\n\ndef find_o_files(all_slices, folder):\n \"\"\"Returns all paths to object files that will assemble the binary.\"\"\"\n for name, gap_seg in find_gaps(all_slices):\n if gap_seg is None:\n yield name, gap_seg, \"??\"\n continue\n path = get_asm_path(name, gap_seg, folder)\n print(path)\n path.stem.replace(\".s\", \".o\")\n yield name, gap_seg, path\n\n\ndef unpack_binary(folder, all_slices, image, addr_start):\n for name, gap_seg, dest in find_o_files(all_slices, folder):\n is_decompiled = gap_seg is None\n\n if not is_decompiled:\n # print(\"name %s dest %s\" % (name, dest))\n disasm(folder, name, image, addr_start, gap_seg, False)\n yield dest\n\n if is_decompiled:\n yield name.replace(\".cpp\", \".o\").replace(\".c\", \".o\")\n\n\ndef compute_end_cap(segments):\n # Final 0x8 -> 0x8; second part ignored\n end_seg = gen_end_segs(segments)\n\n end_slice = Slice(\"# 0x80 [finish] -> 0x80 [ignored]\", end_seg)\n\n return end_slice\n\n\ndef compute_begin_cap(segments):\n # Final 0x8 -> 0x8; second part ignored\n start_seg = gen_start_segs(segments)\n\n start_slice = Slice(\"# 0 [ignored] -> 0x80 [start]\", start_seg)\n\n return start_slice\n\n\ndef gen_cuts(slices, segments):\n # Initial 0 -> 0x8; first part ignored\n\n start_slice = compute_begin_cap(segments)\n end_slice = compute_end_cap(segments)\n\n return [start_slice] + slices + [end_slice]\n\n\ndef compute_cuts_from_spreadsheets(segments, decomplog):\n # segments: binary descriptor, .text: 0x8..0x8\n # decomplog: slices, what decompiled code replaces\n\n slices = list(read_slices(decomplog))\n segments = read_segments(segments)\n\n return slices, segments, gen_cuts(slices, segments)\n\n\ndef unpack_base_dol(asm_dir, pack_dir, binary_dir):\n base_dol = DolBinary(binary_dir / \"main.dol\")\n\n _, _, cuts = compute_cuts_from_spreadsheets(\n pack_dir / \"dol_segments.csv\",\n pack_dir / \"dol_slices.csv\",\n )\n\n # o_files\n return list(unpack_binary(asm_dir / \"dol\", cuts, base_dol.image, base_dol.image_base))\n\n\n## REL\n\n\ndef load_rel_binary(segments, binary_dir) -> (bytearray, int):\n print(segments)\n max_vaddr = max(segments[seg].end for seg in segments)\n image_base = 0x80000000\n image = bytearray(max_vaddr - image_base)\n\n rel_segment_dir = binary_dir / \"rel\"\n for segment in segments:\n rel_segment_path = rel_segment_dir / (segment + \".bin\")\n with open(rel_segment_path, \"rb\") as file:\n data = file.read()\n\n segment_data = segments[segment]\n\n start = segment_data.begin\n end = segment_data.end\n\n data_len = len(data) # virtual\n\n for i in range(start, end):\n # try:\n # x = data[i - start]\n # except:\n # print(segment, hex(i), hex(start), hex(end),i - start, len(data))\n # print(end - (start + len(data)))\n\n # Hack for alignment (miss by 16)\n if i - start >= data_len:\n continue\n image[i - image_base] = data[i - start]\n\n return image, image_base\n\n\ndef unpack_staticr_rel(asm_dir, pack_dir, binary_dir):\n _, segments, cuts = compute_cuts_from_spreadsheets(\n pack_dir / \"rel_segments.csv\",\n pack_dir / \"rel_slices.csv\",\n )\n\n image, image_base = load_rel_binary(segments, binary_dir)\n\n # o_files\n return list(unpack_binary(asm_dir / \"rel\", cuts, image, image_base))\n\n\ndef unpack_everything(asm_dir, pack_dir, binary_dir):\n \"\"\"Unpack all ASM blobs into asm_dir.\"\"\"\n dol_o_files = unpack_base_dol(asm_dir, pack_dir, binary_dir)\n with open(pack_dir / \"dol_objects.txt\", \"w\") as file:\n for path in dol_o_files:\n print(path, file=file)\n rel_o_files = unpack_staticr_rel(asm_dir, pack_dir, binary_dir)\n with open(pack_dir / \"rel_objects.txt\", \"w\") as file:\n for path in rel_o_files:\n print(path, file=file)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Generate ASM blobs and linker object lists.\")\n parser.add_argument(\"--asm_dir\", type=Path, required=True, help=\"Path to ASM dir\")\n parser.add_argument(\"--pack_dir\", type=Path, required=True, help=\"Path to link instructions dir\")\n parser.add_argument(\"--binary_dir\", type=Path, required=True, help=\"Binary containing main.dol and StaticR.rel\")\n args = parser.parse_args()\n\n # Recreate the ASM dir.\n if os.path.exists(args.asm_dir / \"dol\"):\n shutil.rmtree(args.asm_dir / \"dol\")\n if os.path.exists(args.asm_dir / \"rel\"):\n shutil.rmtree(args.asm_dir / \"rel\")\n args.asm_dir.mkdir(exist_ok=True)\n\n # Write the macros file.\n with open(args.asm_dir / \"macros.inc\", \"w\") as file:\n file.write(\"# PowerPC Register Constants\\n\")\n for i in range(0, 32):\n file.write(\".set r%i, %i\\n\" % (i, i))\n for i in range(0, 32):\n file.write(\".set f%i, %i\\n\" % (i, i))\n for i in range(0, 8):\n file.write(\".set qr%i, %i\\n\" % (i, i))\n\n unpack_everything(args.asm_dir, args.pack_dir, args.binary_dir)\n","sub_path":"util/gen_asm.py","file_name":"gen_asm.py","file_ext":"py","file_size_in_byte":10875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196409609","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nfrom base_utils import print_under\n\n# From Positional to Keyword-Only Parameters\nprint_under('From Positional to Keyword-Only Parameters 必须关键字参数')\n\n\ndef tag(name, *content, cls=None, **attrs):\n if cls is not None:\n attrs['class'] = cls\n if attrs:\n attrs_str = ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs.items())\n else:\n attrs_str = ''\n if content:\n return '\\n'.join('<%s%s>%s' % (name, attrs_str, c, name) for c in content)\n else:\n return '<%s%s />' % (name, attrs_str)\n\nprint(tag('html'))\nprint(tag('body', 'test_1', 'test_2'))\nprint(tag('div', cls='sidebar'))\nprint(tag('span', 'test_3', cls='sidebar', title='test_3'))\n\nmy_tag = {'name': 'img', 'src': 'sunset.jpg', 'cls': 'ml20'}\nprint(tag(**my_tag))\n\n# 强制关键字参数 in py3\nprint_under('强制关键字参数 in py3')\n\n\ndef f(name, *, age):\n print('%s is %s years old.' % (name, age))\n\ntry:\n f('Wen Jiang', 23)\nexcept TypeError as e:\n print(e) # f() takes 1 positional argument but 2 were given\n\nf('Wen Jiang', age=23) # Wen Jiang is 23 years old.\n","sub_path":"chap5 Functions as Objects/5.7.py","file_name":"5.7.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"625718967","text":"from openpyxl import Workbook\nfrom openpyxl import load_workbook\nimport sys\n\ndef get_data(ro,col,sheet):\n return sheet.cell(row = ro, column = col).value\n\n#main\n\n#load the excel\nwork_b = load_workbook(filename='_gaussian.xlsx') #here is the name of your xlsx file\nsheetnames = work_b.get_sheet_names()\nsheet = work_b.get_sheet_by_name(sheetnames[0])\n\nwb = Workbook() # Creat sheet\nws = wb.active\n\nnum = 0\nprolist1 = []\nprolist2 = []\nr_row = 1\nfor row in range(1,500000+1):\n\n if get_data(row, 1, sheet) == \"stop_here\":\n break\n if get_data(row, 1, sheet) == \"ACP\":\n num += 1\n ws['A' + str(r_row)] = get_data(row - 1, 1, sheet)\n ws['B' + str(r_row)] = str(prolist1)\n ws['C' + str(r_row)] = str(prolist2)\n r_row += 1\n prolist1 = []\n prolist2 = []\n else:\n prolist1.append(get_data(row,2,sheet))\n prolist2.append(get_data(row,3,sheet))\n\nwb.save(\"_tomatlab\"+sys.argv[1]+\".xlsx\")\n","sub_path":"Assignment02/Gaussian_extraction/step7_tomatlab.py","file_name":"step7_tomatlab.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"92739308","text":"# -*- coding: utf-8 -*-\nfrom xlwt import Workbook, easyxf\n\nfrom django.http import HttpResponse\n\nfrom model_report import arial10\nfrom .base import Exporter\n\n\nclass FitSheetWrapper(object):\n \"\"\"Try to fit columns to max size of any entry.\n To use, wrap this around a worksheet returned from the\n workbook's add_sheet method, like follows:\n\n sheet = FitSheetWrapper(book.add_sheet(sheet_name))\n\n The worksheet interface remains the same: this is a drop-in wrapper\n for auto-sizing columns.\n \"\"\"\n def __init__(self, sheet):\n self.sheet = sheet\n self.widths = dict()\n self.heights = dict()\n\n def write(self, r, c, label='', *args, **kwargs):\n self.sheet.write(r, c, label, *args, **kwargs)\n self.sheet.row(r).collapse = True\n bold = False\n if args:\n style = args[0]\n bold = str(style.font.bold) in ('1', 'true', 'True')\n width = int(arial10.fitwidth(label, bold))\n if width > self.widths.get(c, 0):\n self.widths[c] = width\n self.sheet.col(c).width = width\n\n height = int(arial10.fitheight(label, bold))\n if height > self.heights.get(r, 0):\n self.heights[r] = height\n self.sheet.row(r).height = height\n\n def __getattr__(self, attr):\n return getattr(self.sheet, attr)\n\n\nclass ExcelExporter(Exporter):\n\n def write_rows(self, column_labels, report_rows, report_inlines=None):\n\n if not report_rows or report_rows[0][0]:\n # FIXME: [0][0] is None when real data. Is this reliable?\n return\n\n for index, x in enumerate(column_labels):\n self.sheet1.write(self.row_index, index, u'%s' % x, self.stylebold)\n self.row_index += 1\n for g, rows in report_rows:\n if g:\n self.sheet1.write(self.row_index, 0, u'%s' % g, self.stylebold)\n self.row_index += 1\n for row in list(rows):\n if row.is_value():\n for index, x in enumerate(row):\n if isinstance(x.value, (list, tuple)):\n xvalue = ''.join(['%s\\n' % v for v in x.value])\n else:\n xvalue = x.text()\n self.sheet1.write(self.row_index, index, xvalue, self.stylevalue)\n self.row_index += 1\n\n if report_inlines:\n for inline in report_inlines:\n\n inline_context = inline.get_render_context({}, by_row=row)\n self.write_rows(inline_context['column_labels'], inline_context['report_rows'])\n\n elif row.is_caption:\n for index, x in enumerate(row):\n if not isinstance(x, (unicode, str)):\n self.sheet1.write(self.row_index, index, x.text(), self.stylebold)\n else:\n self.sheet1.write(self.row_index, index, x, self.stylebold)\n self.row_index += 1\n elif row.is_total:\n for index, x in enumerate(row):\n self.sheet1.write(self.row_index, index, x.text(), self.stylebold)\n self.sheet1.write(self.row_index + 1, index, ' ')\n self.row_index += 2\n\n\n def render(self, report, column_labels, report_rows, report_inlines):\n self.row_index = 0\n self.sheet1 = FitSheetWrapper(self.book.add_sheet(report.get_title()[:20]))\n self.write_rows(column_labels, report_rows, report_inlines)\n\n response = HttpResponse(content_type=\"application/ms-excel\")\n response['Content-Disposition'] = 'attachment; filename=%s.xls' % report.slug\n self.book.save(response)\n return response\n\n def __init__(self):\n self.stylebold = easyxf('font: bold true; alignment:')\n self.stylevalue = easyxf('alignment: horizontal left, vertical top;')\n self.book = Workbook(encoding='utf-8')\n","sub_path":"model_report/exporters/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"245504788","text":"# 3078.py\n# 2018.06.17\n\nimport sys\nimport collections\n\nr = sys.stdin.readline\n\nn, k = map(int, r().split())\np = [collections.deque() for _ in range(21)]\ncnt = 0\nfor idx in range(n):\n\ti = len(r().rstrip())\n\twhile p[i] and p[i][0] < idx-k:\n\t\tp[i].popleft()\n\tcnt += len(p[i])\n\tp[i].append(idx)\nprint(cnt)\n\n# 이름 길이를 기준으로 하여 각각의 queue를 만들어 index값을 push한다.\n# 뒤에 사람을 기준으로 친구 쌍을 count하며, 친구의 범위가 넘는 경우는 popleft하여 다음번에 탐색하지 않게한다.\n","sub_path":"3000/3078.py","file_name":"3078.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"64146019","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output, State, MATCH, ALL\r\nimport dash_bootstrap_components as dbc\r\nfrom .plots import App, plot_graph, add_parameters, _params\r\nfrom .homepage import Homepage\r\nfrom jupyter_dash import JupyterDash\r\nimport pandas as pd\r\nimport logging\r\nimport plotly.graph_objects as go\r\nfrom .data_exploration import dataexploration, plot_distributions ,association,get_pps_array,get_corr_array\r\n\r\n# just set the width to 100% is enough ;) you cannot get the browser size with your old server side width checking\r\nWIDTH = \"100%\"\r\n\r\nadded_params_value=[]\r\nexternal_stylesheets = [dbc.themes.BOOTSTRAP,{\r\n 'href': 'https://use.fontawesome.com/releases/v5.8.1/css/all.css',\r\n 'rel': 'stylesheet',\r\n 'integrity': 'sha384-50oBUHEmvpQ+1lW4y57PTFmhCaXp0ML5d60M1M7uH2+nqUivzIebhndOJK28anvf',\r\n 'crossorigin': 'anonymous'\r\n}]\r\n\r\n\r\n\r\ndef run_app(df, host=\"0.0.0.0\", port=12345):\r\n app = JupyterDash(__name__, external_stylesheets=external_stylesheets,suppress_callback_exceptions=True)\r\n app.config.suppress_callback_exceptions = True\r\n app.layout = html.Div([\r\n dcc.Location(id = 'url', refresh = False),\r\n html.Div(id = 'page-content')])\r\n try:\r\n @app.callback(Output('page-content', 'children'),[Input('url', 'pathname')])\r\n def display_page(pathname):\r\n if pathname == '/plot':\r\n return App(df)\r\n elif pathname == '/data-exploration':\r\n return dataexploration(df)\r\n else:\r\n return Homepage(df)\r\n\r\n @app.callback(Output('hist_plot','children'),[Input('hist_col_dropdown','value'),Input('theme_dropdown','value')])\r\n def update_data_distribution(col_list,theme):\r\n children = plot_distributions(df,col_list,theme)\r\n return children\r\n \r\n @app.callback([Output('corr','children'),Output('heatmap','style')],\r\n [Input('col1','value'),Input('col2','value'),Input('show-more','n_clicks')])\r\n def update_association(col1,col2,n):\r\n heat_map_style={'display':'none'}\r\n try:\r\n corr_child=association(df,col1,col2)\r\n except (TypeError):\r\n corr_child=[html.P('Please select numeric columns', style={'color':'red'})]\r\n if n is not None:\r\n if n%2==1:\r\n heat_map_style=_params()\r\n\r\n return corr_child,heat_map_style\r\n\r\n @app.callback([Output('output_plots','children'),Output('add-parameter-drop','options'),Output('color_div','style'),\r\n Output('facet_col_div','style'),Output('margin_x_div','style'),Output('margin_y_div','style'),Output('trendline_div','style'),\r\n Output('size_div','style'),Output('animation_div','style'),Output('opacity_div','style'),Output('barmode_div','style'),\r\n Output('boxmode_div','style'),Output('q_div','style'),Output('points_div','style')],\r\n [Input('charttype','value'), Input('xaxis','value'), Input('yaxis','value'), Input('theme_dropdown','value'), \r\n Input('add-parameter-drop','value'),Input('color','value'),Input('facet_col','value'),Input('margin-x','value'),\r\n Input('margin-y','value'),Input('trendline','value'),Input('size','value'),Input('animation','value'),Input('opacity','value'),\r\n Input('barmode','value'), Input('boxmode','value'),Input('q','value'),Input('points','value')])\r\n def update_plots(chart_type,x,y,theme,added_params,color,facet_col,margin_x,margin_y,trendline,size,animation,opacity,barmode,boxmode,q,points):\r\n color_style = {'display': 'none'}\r\n facet_col_style = {'display': 'none'}\r\n margin_x_style = {'display': 'none'}\r\n margin_y_style = {'display': 'none'}\r\n trendline_style={'display':'none'}\r\n size_style = {'display':'none'}\r\n animation_style = {'display':'none'}\r\n opacity_style={'display': 'none'}\r\n barmode_style = {'display': 'none'}\r\n boxmode_style = {'display': 'none'}\r\n q_style={'display': 'none'}\r\n points_style={'display': 'none'}\r\n\r\n facet_col_val,color_val, margin_x_val,margin_y_val,trendline_val,size_val,animation_val,opacity_val,barmode_val,boxmode_val,q_val,points_val,notched_val=None,None,None,None,None,None,None,1,'relative','group','linear','outliers',False\r\n box_val=False\r\n log_x = False\r\n log_y = False\r\n for param in added_params:\r\n if param == 'log_x':\r\n log_x=True\r\n if param=='log_y':\r\n log_y=True\r\n if param=='color':\r\n color_style = _params()\r\n color_val=color\r\n if param=='facet_col':\r\n facet_col_style = _params()\r\n facet_col_val=facet_col\r\n if param == 'marginal_x':\r\n margin_x_style= _params()\r\n margin_x_val = margin_x\r\n if param == 'marginal_y':\r\n margin_y_style=_params()\r\n margin_y_val=margin_y\r\n if param=='trendline':\r\n trendline_style=_params()\r\n trendline_val=trendline\r\n if param=='size':\r\n size_style = _params()\r\n size_val=size\r\n if param == 'animation_frame':\r\n animation_style=_params()\r\n animation_val = animation\r\n if param == 'opacity':\r\n opacity_style=_params()\r\n opacity_val=opacity\r\n\r\n if param == 'barmode':\r\n barmode_style=_params()\r\n barmode_val=barmode\r\n\r\n if param == 'mode':\r\n boxmode_style=_params()\r\n boxmode_val=boxmode\r\n if param == 'quartilemethod':\r\n q_style=_params()\r\n q_val=q\r\n if param == 'points':\r\n points_style=_params()\r\n points_val=points\r\n if param == 'notched':\r\n notched_val=True\r\n if param == 'box':\r\n box_val=True\r\n options = add_parameters(chart_type)\r\n plot_children = plot_graph(plot_type=chart_type,df=df,x=x,y=y,theme=theme,color=color_val,facet_col=facet_col_val,\r\n marginal_x=margin_x_val,marginal_y=margin_y_val,trendline=trendline_val,log_x=log_x,log_y=log_y,size=size_val,\r\n animation_frame =animation_val,opacity=opacity_val,barmode=barmode_val,boxmode=boxmode_val,\r\n quartilemethod=q_val,points=points_val,notched=notched_val,box=box_val)\r\n \r\n return plot_children, options, color_style, facet_col_style , margin_x_style, margin_y_style, trendline_style , size_style ,animation_style, opacity_style, barmode_style, boxmode_style,q_style,points_style \r\n\r\n app.run_server(mode='inline',width=WIDTH,host=host,port=port)\r\n except:\r\n app.run_server(mode='inline',width=WIDTH,host=host,port=port)\r\n\r\n\r\n","sub_path":"autoplotter/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"130214509","text":"from random import choice\n\n# Problem 1\ndef arithmagic():\n step_1 = input(\"Enter a 3-digit number where the first and last \"\n \"digits differ by 2 or more: \")\n if int(step_1) > 999 or int(step_1) < 100:\n raise ValueError(\"Did not enter a 3-digit number\")\n if abs(int(step_1[0]) - int(step_1[2])) < 2:\n raise ValueError(\"First and last digits do not differ by 2 or more\")\n\n step_2 = input(\"Enter the reverse of the first number, obtained \"\n \"by reading it backwards: \")\n if int(step_2[0]) != int(step_1[2]) or int(step_2[1]) != int(step_1[1]) \\\n or int(step_2[2]) != int(step_1[0]):\n raise ValueError(\"Did not enter the reverse of the first number\")\n\n step_3 = input(\"Enter the positive difference of these numbers: \")\n if int(step_3) != abs(int(step_1) - int(step_2)):\n raise ValueError(\"Did not enter the positive difference\")\n\n step_4 = input(\"Enter the reverse of the previous result: \")\n if int(step_4[0]) != int(step_3[2]) or int(step_4[1]) != int(step_3[1]) \\\n or int(step_4[2]) != int(step_3[0]):\n raise ValueError(\"Did not enter the reverse of the previous result\")\n\n print (step_3 + \"+\" + step_4 + \"= 1089 (ta-da!)\")\n\narithmagic()\n\n# Problem 2\ndef random_walk(max_iters=1e12):\n walk = 0\n direction = [1,-1]\n for i in range(int(max_iters)):\n try:\n walk += choice(direction)\n except KeyboardInterrupt:\n print(\"Process interrupted at iteration \" + str(i))\n return walk\n print(\"Process completed\")\n return walk\n\nprint(random_walk())\n\n# Problem 3 and 4\nclass ContentFilter(object):\n def __init__(self,name):\n try:\n if not isinstance(name,str):\n raise TypeError\n except TypeError:\n print(\"TypeError: File name not a string\")\n else:\n self.name = name\n with open(name,'a') as file:\n file.write('')\n with open(name,'r') as file2:\n self.contents = file2.read()\n\n def uniform(self,name_to,mode='w',case='upper'):\n if mode!='w' and mode !='a':\n raise ValueError(\"Invalid mode. Must be 'w' or 'a'\")\n if case!='upper' and case !='lower':\n raise ValueError(\"Invalid case. Must be 'upper' or 'lower'\")\n if case == 'upper':\n with open(name_to,mode) as out_file:\n out_file.write(self.contents.upper())\n if case == 'lower':\n with open(name_to,mode) as out_file:\n out_file.write(self.contents.lower())\n\n def reverse(self,name_to,mode='w',unit='line'):\n if mode!='w' and mode !='a':\n raise ValueError(\"Invalid mode. Must be 'w' or 'a'\")\n if unit!='line' and unit!='word':\n raise ValueError(\"Invalid reverse style. Must be 'word' or 'line'\")\n if unit=='word':\n lines = self.contents.split('\\n')\n with open(name_to,mode) as out_file:\n for i in lines:\n words = i.split()\n for j in reversed(words):\n out_file.write(j+' ')\n out_file.write('\\n')\n if unit=='line':\n lines = self.contents.split('\\n')\n with open(name_to,mode) as out_file:\n for i in reversed(lines):\n out_file.write(i+'\\n')\n\n def transpose(self,name_to,mode='w'):\n if mode!='w' and mode !='a':\n raise ValueError(\"Invalid mode. Must be 'w' or 'a'\")\n # assume equal number of words on each line of the input file\n lines = self.contents.split('\\n')\n line_num = len(lines)\n\n words_per_line = len(lines[0].split())\n tot_words = self.contents.split()\n\n with open(name_to,mode) as out_file:\n for i in range(words_per_line):\n for j in range(line_num):\n out_file.write(tot_words[j*words_per_line+i]+\" \")\n out_file.write('\\n')\n\n def __str__(self):\n char = len(self.contents)\n alph = 0\n num = 0\n white = 0\n for i in range(len(self.contents)):\n if self.contents[i].isalpha():\n alph = alph+1\n elif self.contents[i].isdigit():\n num = num+1\n elif self.contents[i].isspace():\n white = white+1\n source = \"Source file: \" + self.name + \"\\n\"\n tot_char = \"Total characters: \" + str(char) + \"\\n\"\n alph_char = \"Alphabetic characters: \" + str(alph) + \"\\n\"\n num_char = \"Numerical characters: \" + str(num) + \"\\n\"\n white_char = \"Whitespace characters: \" + str(white) + \"\\n\"\n lines = \"Number of lines: \" + str(len(self.contents.split('\\n')))\n return source+tot_char+alph_char+num_char+white_char+lines\n\nfile = ContentFilter(\"hello.txt\")\nprint(file)\nfile.uniform(\"hello_up.txt\")\nfile.uniform(\"hello_low.txt\",'w','lower')\nfile.reverse(\"hello_line.txt\")\nfile.reverse(\"hello_word.txt\",'w','word')\nfile.transpose(\"hello_trans.txt\")\nfile2 = ContentFilter(12)\n","sub_path":"ProbSets/Comp/Week 1/rzhang_exceptions.py","file_name":"rzhang_exceptions.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"241565284","text":"import cv2\nimport numpy as np\nimport dlib\n\ndef extract_index_nparray(nparray):\n\tindex=None\n\tfor num in nparray[0]:\n\t\tindex=num\n\t\tbreak\n\n\treturn index\n\n\n\nimg=cv2.imread(\"/home/chiranjeev/Desktop/face_swapping/bradely.jpeg\")\nimg_gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nmask=np.zeros_like(img_gray)\ndetector=dlib.get_frontal_face_detector()\npredictor=dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\nfaces=detector(img_gray)\n\nfor face in faces:\n\tlandmarks=predictor(img_gray,face)\n\tlandmarks_points=[]\n\n\tfor n in range(0,68):\n\t\tx=landmarks.part(n).x\n\t\ty=landmarks.part(n).y\n\t\tlandmarks_points.append((x,y))\n\n\t\t# cv2.circle(img,(x,y),3,(0,0,255),1)\n\n\tpoints=np.array(landmarks_points,np.int32)\n\tconvexhull=cv2.convexHull(points)\n\n\t# cv2.polylines(img,[convexhull],True,(255,0,0),3)\n\tcv2.fillConvexPoly(mask,convexhull,255)\n\n\tface_image1=cv2.bitwise_and(img,img,mask=mask)\n\n\t#Delaunay traingulation\n\trect=cv2.boundingRect(convexhull)\n\tsubdiv=cv2.Subdiv2D(rect)\n\tsubdiv.insert(landmarks_points)\n\ttraingles=subdiv.getTriangleList()\n\ttraingles=np.array(traingles,dtype=np.int32)\n\n\tindexes_triangles=[]\n\n\tfor t in traingles:\n\t\tpt1=(t[0],t[1])\n\t\tpt2=(t[2],t[3])\n\t\tpt3=(t[4],t[5])\n\n\t\t##pt1\n\n\t\t#print(\"pt1=\\n\",pt1)\n\t\tindex_pt1=np.where((points==pt1).all(axis=1))\n\t\tindex_pt1=extract_index_nparray(index_pt1)\n\t\t#print(\"index_pt1\\n\",index_pt1)\n\n\t\t##pt2\n\n\t\t#print(\"pt2=\\n\",pt2)\n\t\tindex_pt2=np.where((points==pt2).all(axis=1))\n\t\tindex_pt2=extract_index_nparray(index_pt2)\n\t\t#print(\"index_pt2\\n\",index_pt2)\n\t\n\t\t##pt3\n\n\t\t#print(\"pt3=\\n\",pt3)\n\t\tindex_pt3=np.where((points==pt3).all(axis=1))\n\t\tindex_pt3=extract_index_nparray(index_pt3)\n\t\t#print(\"index_pt3\\n\",index_pt3)\n\n\t\tif index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:\n\t\t\ttriangle=[index_pt1,index_pt2,index_pt3]\n\t\t\tindexes_triangles.append(triangle) \n\n\n\t\t# cv2.circle(img,pt1,3,(0,255,0),-1)\n\t\t# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/created_image_marked_pt1_of_triangles.jpg\",img)\n\t\n\t\t# cv2.circle(img,pt2,3,(15,29,130),2)\n\t\t# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/created_image_marked_pt2_of_triangles.jpg\",img)\n\n\t\t# cv2.circle(img,pt3,3,(139,55,10),2)\n\t\t# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/created_image_marked_pt3_of_triangles.jpg\",img)\n\n\n\t\t# cv2.line(img,pt1,pt2,(0,0,255),1)\n\t\t# cv2.line(img,pt2,pt3,(0,0,255),1)\n\t\t# cv2.line(img,pt3,pt1,(0,0,255),1)\n\t\n\t##################################\n\t#printing indexes\n\t# print(indexes_triangles)\n\t##################################\n\n\t# cv2.imshow(\"created_image\",img)\n\t# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/created_image.jpg\",img)\n\t#cv2.imshow(\"face_image\",face_image1)\n\t#cv2.imshow(\"mask\",mask)\n\n########FACE-2################\n\nimg2=cv2.imread(\"/home/chiranjeev/Desktop/face_swapping/faces2.jpeg\")\nimg2_gray=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\n\nfaces2=detector(img2_gray)\n\nfor face in faces2:\n\tlandmarks2=predictor(img2_gray,face)\n\tlandmarks_points2=[]\n\n\tfor n in range(0,68):\n\t\tx=landmarks2.part(n).x\n\t\ty=landmarks2.part(n).y\n\t\tlandmarks_points2.append((x,y))\n\n\n\t\t#cv2.circle(img2,(x,y),3,(0,255,0),-1)\n\n\t#drawing triangle on face2 same as face1\n\n\t#cv2.imshow(\"created_image2\",img2)\n\t#cv2.imwrite(\"created_image2.jpg\",img2)\n\n\tpoints2=np.array(landmarks_points2,np.int32)\n\tconvexhull2=cv2.convexHull(points2)\n\nlines_space_mask=np.zeros_like(img_gray)\nlines_space_new_face=np.zeros_like(img2)\n\nimg2_new_face=np.zeros_like(img2,np.uint8)\n\nfor triangle_index in indexes_triangles:\n\n\t#########first face#########\n\n\ttr1_pt1=landmarks_points[triangle_index[0]]\n\ttr1_pt2=landmarks_points[triangle_index[1]]\n\ttr1_pt3=landmarks_points[triangle_index[2]]\n\ttraingle1=np.array([tr1_pt1,tr1_pt2,tr1_pt3],np.int32)\n\trect1=cv2.boundingRect(traingle1)\n\t(x1,y1,w1,h1)=rect1\n\t# cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1),(0,255,0),1)\n\tcropped_triangle1=img[y1:y1+h1,x1:x1+w1]\n\n\tcropped_tr1_mask=np.zeros((h1,w1),np.uint8)\n\tpoints1=np.array([[tr1_pt1[0]-x1,tr1_pt1[1]-y1],\n\t\t\t\t\t[tr1_pt2[0]-x1,tr1_pt2[1]-y1],\n\t\t\t\t\t[tr1_pt3[0]-x1,tr1_pt3[1]-y1]],np.int32)\n\n\tcv2.fillConvexPoly(cropped_tr1_mask,points1,255)\n\tcropped_triangle1=cv2.bitwise_and(cropped_triangle1,cropped_triangle1,mask=cropped_tr1_mask)\n\n\t#linespace\n\n\t# cv2.line(lines_space_mask,tr1_pt1,tr1_pt2,255)\n\t# cv2.line(lines_space_mask,tr1_pt2,tr1_pt3,255)\n\t# cv2.line(lines_space_mask,tr1_pt1,tr1_pt3,255)\n\tlines_sapce=cv2.bitwise_and(img,img,mask=lines_space_mask)\n\t#########second face#########\n\n\ttr2_pt1=landmarks_points2[triangle_index[0]]\n\ttr2_pt2=landmarks_points2[triangle_index[1]]\n\ttr2_pt3=landmarks_points2[triangle_index[2]]\n\ttriangle2=np.array([tr2_pt1,tr2_pt2,tr2_pt3],np.int32)\n\trect2=cv2.boundingRect(triangle2)\n\t(x2,y2,w2,h2)=rect2\n\t# cv2.rectangle(img2,(x2,y2),(x2+w2,y2+h2),(0,255,0,1))\n\tcropped_triangle2=img2[y2:y2+h2,x2:x2+w2]\n\t\n\tcropped_tr2_mask=np.zeros((h2,w2),np.uint8)\n\tpoints2=np.array([[tr2_pt1[0]-x2,tr2_pt1[1]-y2],\n\t\t\t\t\t[tr2_pt2[0]-x2,tr2_pt2[1]-y2],\n\t\t\t\t\t[tr2_pt3[0]-x2,tr2_pt3[1]-y2]],np.int32)\n\n\tcv2.fillConvexPoly(cropped_tr2_mask,points2,255)\n\t\n\n\t# cv2.line(img2,tr2_pt1,tr2_pt2,(0,0,255),2)\n\t# cv2.line(img2,tr2_pt2,tr2_pt3,(0,0,255),2)\n\t# cv2.line(img2,tr2_pt3,tr2_pt1,(0,0,255),2)\n\n\t#WARP TRAINGLES\n\tpoints1=np.float32(points1)\n\tpoints2=np.float32(points2)\n\t#it will tell how much to swap these two triangles\n\tM=cv2.getAffineTransform(points1,points2)\n\t#print(M)\n\n\t#warping triangle1 into triangle2\n\twarped_triangle=cv2.warpAffine(cropped_triangle1,M,(w2,h2))\n\twarped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr2_mask)\n\t#break\n\n\t#Reconstruct destination face\n\timg2_new_face_rect_area=img2_new_face[y2:y2+h2,x2:x2+w2]\n\t\n\timg2_new_face_gray=cv2.cvtColor(img2_new_face_rect_area,cv2.COLOR_BGR2GRAY)\n\t_,background_mask=cv2.threshold(img2_new_face_gray,1,255,cv2.THRESH_BINARY_INV) #to put face\n\tbackground=cv2.bitwise_and(warped_triangle,warped_triangle,mask=background_mask)\n\timg2_new_face_rect_area = cv2.add(img2_new_face_rect_area, background)\n\timg2_new_face[y2:y2+h2,x2:x2+w2]=img2_new_face_rect_area\n\n#face_swapped\t\n\nimg2_face_mask=np.zeros_like(img2_gray)\nimg2_head_mask=cv2.fillConvexPoly(img2_face_mask,convexhull2,255)\nimg2_face_mask=cv2.bitwise_not(img2_head_mask)\n\nimg2_head_noface=cv2.bitwise_and(img2,img2,mask=img2_face_mask)\n\nresult=cv2.add(img2_head_noface,img2_new_face)\n\n\n(x,y,w,h)=cv2.boundingRect(convexhull2)\ncenter_face2=(int((x+x+w)/2),int((y+y+h)/2))\nseamlessclone=cv2.seamlessClone(result,img2,img2_head_mask,center_face2,cv2.MIXED_CLONE)\n\ncv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/seamlessclone_result.jpg\",seamlessclone)\n\ncv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/final_swapping_result.jpg\",result)\n\n\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/background.jpg\",background)\n\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/img2_new_face_triangle_area.jpg\",img2_new_face)\n\n\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/wrapped_triangle.jpg\",warped_triangle)\n\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/cropped_tr1_mask.jpg\",cropped_tr1_mask)\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/cropped_tr2_mask.jpg\",cropped_tr2_mask)\n\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/cropped_tr1_seperated_triangle1_mask.jpg\",cropped_triangle1)\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/cropped_tr2_seperated_triangle2_mask.jpg\",cropped_triangle2)\n\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/cropped_single_triangle_on_img.jpg\",cropped_triangle1)\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/cropped_single_triangle_on_img2.jpg\",cropped_triangle2)\n\n\n#cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/single_triangle_on_img.jpg\",img)\n#cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/single_triangle_on__img2.jpg\",img2)\n\n# cv2.imshow(\"same_pts_on_img2_as_img1\",img2)\n# cv2.imwrite(\"/home/chiranjeev/Desktop/face_swapping/same_pts_on_img2_as_img1.jpg\",img2)","sub_path":"face_swapping.py","file_name":"face_swapping.py","file_ext":"py","file_size_in_byte":7876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"547590384","text":"import time\nimport os\nimport pathlib\nimport datetime\n\nfrom fastapi.logger import logger\n\nimport dependency\nfrom fastapi import FastAPI, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom starlette import status\nfrom starlette.responses import JSONResponse\n\nfrom dependency import CredentialException, pool\nfrom routers.auth import auth_router\nfrom routers.prediction import model_router\nfrom routers.training import training_router\n\n\n# App instance used by the server \napp = FastAPI()\n\n# --------------------------------------------------------------------------\n# | Router Registration |\n# |---------------------|\n# In order for groups of routes to work with the server, they must be added\n# below here with a specific router. Routers act as an \"app instance\" that\n# can be used from outside of the main.py file. The specific code for each\n# router can be found in the routers/ folder.\n#\n# --------------------------------------------------------------------------\n\napp.include_router(\n auth_router,\n prefix=\"/auth\",\n tags=[\"auth\"],\n responses={404: {\"detail\": \"Not found\"}},\n)\n\napp.include_router(\n model_router,\n prefix=\"/model\",\n tags=[\"models\"],\n responses={404: {\"detail\": \"Not found\"}},\n)\n\napp.include_router(\n training_router,\n prefix=\"/training\",\n tags=[\"training\"],\n responses={404: {\"detail\": \"Not found\"}},\n)\n\n\n@app.exception_handler(CredentialException)\nasync def credential_exception_handler(request: Request, exc: CredentialException):\n \"\"\"\n Handler for credential exception. This type of exception is raised when a client attempts to access an endpoint\n without sufficient permissions for endpoints that are protected by OAuth2. This exception is raised if the client\n has no bearer token, if the bearer token is expired, or if their account does not have sufficient permissions/roles\n to access a certain endpoint.\n\n :param request: HTTP Request object\n :param exc: Exception\n :return: 401 HTTP Exception with authentication failure message\n \"\"\"\n return JSONResponse(\n status_code=status.HTTP_401_UNAUTHORIZED,\n content={\n \"status\": 'failure',\n \"detail\": \"Unable to validate credentials.\"\n },\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n\n# -------------------------------\n# Web Server Configuration\n# -------------------------------\n\n# Cross Origin Request Scripting (CORS) is handled here.\norigins = [\n \"http://localhost\",\n \"http://localhost:3000\",\n \"http://localhost:5057\",\n \"http://localhost:5000\",\n \"http://localhost:6005\",\n \"http://localhost:6379\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n# -------------------------------\n# Basic Routes\n# -------------------------------\n\n\n@app.get(\"/\")\nasync def root():\n \"\"\"\n Root endpoint that validates the server is running. This requires no authentication to call, and will always\n return the same result so long as the server is running.\n :return: {'status': 'success'} if server is running, else no HTTP response.\n \"\"\"\n return {\n \"status\": \"success\",\n \"detail\": 'PhotoAnalysisServer is Running'\n }\n\n\ndef delete_unused_files():\n \"\"\"\n Scheduled thread that will check all uploaded images every hour and delete them if they\n have not been accessed recently.\n \"\"\"\n\n current_time = datetime.timedelta(hours=-4) + datetime.datetime.now()\n\n for file_name in os.listdir('./prediction_images/'):\n\n file_creation_time = datetime.datetime.fromtimestamp(\n pathlib.Path('./prediction_images/' + file_name).stat().st_ctime\n )\n\n time_since_file_creation = current_time - file_creation_time\n\n if time_since_file_creation.days >= 1:\n os.remove('./prediction_images/' + file_name)\n logger.debug('[Automated Deletion Thread] Removed Image File [' + file_name + ']')\n\n\n # Delay for an hour between deletion checks\n for _ in range(60*60): \n if not dependency.shutdown: # Check between increments to stop hanging on shutdown\n time.sleep(1) \n else:\n break\n\n if dependency.shutdown:\n logger.debug('Image Deletion Thread Terminated')\n\n\n\n@app.on_event('startup')\ndef on_startup():\n \"\"\"\n On server startup, schedule\n \"\"\" \n\n pool.submit(delete_unused_files) \n\n\n\n@app.on_event('shutdown') \ndef on_shutdown():\n \"\"\"\n On server shutdown, stop all background model pinging threads.\n \"\"\"\n dependency.shutdown = True\n pool.shutdown(wait=True)\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"575558384","text":"import json\nimport csv\nimport os\nfrom sys import exit\n\ndef Print(node):\n for key, value in node.items():\n if key != 'children':\n print(key)\n print(value)\n if len(node['children']) > 0: \n for s in node['children']:\n Print(s) \n\ndef Work(base, node, name, num):\n base['file']=[]\n base['file'].append(name)\n for key, value in node.items():\n if key != 'children':\n base[key]=[]\n base[key].append(value)\n if len(node['children']) > 0:\n counter = 0\n base['children']=[]\n for s in node['children']:\n base['children'].append(counter)\n base['children'][counter]={}\n Work(base['children'][counter], s, name, num)\n counter +=1\n else:\n base['children']=[]\n \ndef Add(base, node, name2):\n base['file'].append(name2)\n for key, value in node.items():\n if key != 'children':\n base[key].append(value)\n if len(node['children']) > 0:\n counter = 0\n for s in node['children']:\n Add(base['children'][counter], s, name2)\n counter +=1 \n\n\ndef ExtractTaxids(base, list1):\n if base['reads'][0]>0:\n list1.append(base['taxid'][0])\n for s in base['children']:\n ExtractTaxids(s, list1)\n\ndef addlabel(base):\n base['pathogenic']=False\n base['doid']=[]\n base['disease']=[]\n base['symptom']={}\n for s in base['children']:\n addlabel(s)\n \ndef highlightPathogen(base, list1, list2):\n for key in list1:\n for kee in key:\n if (str(base['taxid'][0])==kee[10:]):\n base['pathogenic']=True\n for idx,item in enumerate(list1[0][str(kee)]):\n base['disease'].append(item['DOID_label'])\n base['doid'].append(item['DOID'])\n base['symptom'][str(item['DOID'])]=[]\n for sym in list2:\n for sympt in sym:\n if(str(item['DOID'])==str(sympt)):\n print(item['DOID'])\n print(list2[0][str(sympt)]['HP_label'])\n base['symptom'][str(sympt)].append(list2[0][str(sympt)]['HP_label'])\n for s in base['children']:\n highlightPathogen(s, list1, list2)\n\n# [str(kee['DOID'])]\n# def addsymptoms(base, symptoms, counter):\n# for key in symptoms:\n# for kee in key:\n# if(str(kee['DOID'])==str(base['doid'][counter])):\n# base[str(kee['DOID'])].append(str(kee[\"HP_label\"]))\n\ncount=0\ncounter=0\ndata={}\ntaxlist=[]\ndirectory = 'C:/Users/garyk/Documents/python_code/pathogen-dashboard/jsons/'\noutdir = 'C:/Users/garyk/Documents/python_code/pathogen-dashboard/'\npathogenlist=[]\nsymptoms=[]\n\nfor filename in os.listdir(directory):\n with open(directory+filename) as json_data:\n d=json.load(json_data)\n name=(count+1)\n if (count==0):\n Work(data, d, name, count)\n else:\n Add(data, d, name)\n count=count+1\n\nExtractTaxids(data, taxlist)\naddlabel(data)\n\nwith open(outdir + 'taxlist.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(taxlist)\n\n####following lines use the pathogen and symptom list generated by quering the database\n\nwith open('C:/Users/garyk/Documents/python_code/pathogen-dashboard/database2/metagenomic_data_db_info_2.json') as f:\n for line in f:\n pathogenlist.append(json.loads(line))\n \nwith open('C:/Users/garyk/Documents/python_code/pathogen-dashboard/database2/disease_to_symptoms.json') as f:\n for line in f:\n symptoms.append(json.loads(line))\n\nhighlightPathogen(data, pathogenlist, symptoms)\n# addsymptoms(data,symptoms,counter)\n\nwith open(outdir + 'dashboard/app/json_output/' + 'data.json', 'w') as outfile:\n json.dump(data, outfile)","sub_path":"merge_jsons.py","file_name":"merge_jsons.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"595864383","text":"\n# install tcl/tk=8.6.8,\n# error after install tk(sudo apt install -y python-tk, sudo apt install -y python3-tk):\n# conflict This probably means that tk wasn't installed properly.\n\n# solution to this:\n# gedit /usr/local/lib/tk8.6/tk.tcl(line 14)\n# change (package require -exact Tk 8.6.8) to (package require -exact Tk 8.6.8)\n\n\n\n#\n#\n# http://liulab.csrc.ac.cn/dokuwiki/doku.php (sastax)\n#\n#\n\n# error(import turicreate in python):\n# ImportError: libblas.so.3: cannot open shared object file\n# solution:\n# sudo apt-get install libatlas-base-dev\n# https://isis.astrogeology.usgs.gov/IsisSupport/index.php?topic=3614.0\n#\n\n\n\n#\n#\n# how to use wordcount in python:\n# https://stackoverflow.com/questions/19674336/how-to-write-a-wordcount-program-using-python-without-using-map-reduce\n\n\n\n\n#\n#\n#\n# UBUNTU NVIDIA VERSION:\n# lspci|grep -i vga\n#\n\n# install opencv2 on ubuntu(imort cv2 in python):\n# pip install opencv-python\n# run the weixin TiaoYiTiao using ubuntu(please install adb, fastboot):\n# sudo apt -y install android-tools-adb android-tools-fastboot\n# PLEASE RESET TO DEFAULT IN THE KAIFAZHEMOSHI SET(using adb to connect android to computer)\n# deal with no permissions with adb devics:\n# sudo adb kill-server\n# sudo adb start-server\n# adb devices\n# python webchat_jump_auto.py:\n# https://github.com/Prinsphield/Wechat_AutoJump.git\n#\n#\n#\n#\n#\n#\n#\n\n\nimport Tkinter\ntop=Tkinter.Tk()\nimport commands\n\ndef call_back():\n print(\"hello\")\n # print (commands.getoutput('ls'))\n print (commands.getoutput('echo \"this is is good!!!\"'))\n\n\ntkk=Tkinter.Button(top,text='hello,world',command=call_back,height=11,width=13)\ntkk.pack()\nTkinter.mainloop()\n","sub_path":"un_ln/tkpython/tkplot/tk_command.py","file_name":"tk_command.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"179494875","text":"#!/usr/bin/env python\r\n# Run with:\r\n# > gdb --batch -q -x locate_struct.py\r\n\r\n# Author: Fabio Pagani \r\n# Author: Davide Balzarotti \r\n# Creation Date: 12-09-2016\r\n\r\nimport traceback\r\nimport socket\r\nimport cProfile\r\nimport logging\r\nimport time\r\nimport gdb\r\nimport sys\r\nimport os\r\n\r\nsys.path.append(\"./\")\r\nfrom mytypes import Sample, Struct, Field, Node\r\nfrom explorer import Explorer\r\nfrom loader import Loader\r\nfrom qemu_gdb import *\r\nfrom worklist import *\r\nfrom utils import *\r\n\r\nSNAME = str(SNAME)\r\nKDIR = str(KDIR)\r\nQEMU_PORT = 2222\r\nGDB_PORT = 1234\r\n\r\ndef fixup_field(s, f):\r\n # The size of kmem_cache is not the one reported in the DWARF\r\n # symbols: the array 'node' does not contain MAX_NUMNODES (as\r\n # specified in the definition) but rather nr_node_ids elements\r\n # (free_kmem_cache_nodes).\r\n if s.ty == \"struct kmem_cache\" and f.name == \"node\":\r\n nr_node_ids = int(gdb.parse_and_eval(\"nr_node_ids\"))\r\n current_size = len(f.array_elements)\r\n f.ty = f.ty.replace(str(current_size), str(nr_node_ids))\r\n f.array_elements = f.array_elements[:nr_node_ids]\r\n s.size -= current_size * 8\r\n s.size += nr_node_ids * 8\r\n logging.debug(\"Fixed '%s' in:\\n%s\" % (f.name, s))\r\n\r\n if s.ty == \"struct e820_table\" and f.name == \"entries\":\r\n nr_entries = s[\"nr_entries\"].value\r\n entries = f.array_elements\r\n f.array_elements = entries[:nr_entries]\r\n e820_entry_size = entries[1] - entries[0]\r\n s.size = e820_entry_size * nr_entries\r\n logging.debug(\"Fixed '%s' in:\\n%s\" % (f.name, s)) \r\n\r\n \r\ndef fixup_struct(s):\r\n if s.ty == \"struct task_struct\":\r\n s.size = int(gdb.parse_and_eval(\"arch_task_struct_size\"))\r\n\r\n if s.ty == \"struct thread_struct\" or s.ty == \"struct fpu\":\r\n s.size -= (int(gdb.parse_and_eval(\"init_task\").type.sizeof) -\r\n int(gdb.parse_and_eval(\"arch_task_struct_size\")))\r\n \r\ndef walk_field(worklist, explorer, s, f, struct, field, field_name):\r\n to_explore = []\r\n if is_ptr_of_ptr_field(s, f) and f.is_deref():\r\n field = cast_ptr_of_ptr(s, f, struct, field)\r\n f.value = gdb_value_to_int(field)\r\n f.set_ptr_array_of_ptr()\r\n\r\n if f.is_array_of_struct() or f.is_array_of_struct_ptr() or f.is_ptr_array_of_ptr():\r\n for i, (name, v) in enumerate(walk_array(field_name, field)):\r\n if is_struct_pointer(v.type):\r\n f.add_array_element(v)\r\n else:\r\n f.add_array_element(v.address)\r\n to_explore.append((v, i))\r\n\r\n worklist.append(name, v)\r\n\r\n if is_percpu_field(s, f):\r\n f.set_percpu()\r\n \r\n if f.value == 0:\r\n return []\r\n\r\n for offset, name, v in explorer.handle_percpu_field(field, field_name):\r\n f.add_array_element(v)\r\n worklist.append(name, v)\r\n to_explore.append((v, -1))\r\n # Here we keep only the last one..\r\n f.value = offset\r\n\r\n return to_explore\r\n\r\ndef walk_struct(w, worklist, sample, explorer):\r\n struct_name, struct, global_root = w\r\n\r\n s = Struct(struct.address,\r\n struct.type,\r\n struct_name,\r\n global_root)\r\n \r\n fixup_struct(s) \r\n \r\n valid = is_valid_struct(struct)\r\n logging.debug(\"Walking struct '%s' '%s' (size: %d)... @ 0x%016x (valid: %s) %s\" %\r\n (s.ty, s.name, s.size, s.addr, valid, \"GLOBAL\" if s.global_root else \"\"))\r\n\r\n \r\n if not valid:\r\n logging.debug('%s' % struct)\r\n return\r\n\r\n for field_name, field in deep_items_anon(struct): # Loop on the fields of the struct\r\n if is_type_size_zero(field.type):\r\n logging.warning(\"Zero size for field: %s %s\" % (field.type, field_name))\r\n continue\r\n\r\n f = s.addField(field_name, field)\r\n\r\n appended = worklist.append(field_name, field)\r\n\r\n to_explore = [(field, -1)]\r\n to_explore += walk_field(worklist, explorer, s, f, struct, field, field_name)\r\n\r\n try:\r\n fixup_field(s, f)\r\n except gdb.error:\r\n logging.warning(\"Exception while fixing '%s' in:\\n%s\" % (f.name, s))\r\n\r\n logging.debug(f)\r\n\r\n if not appended and len(to_explore) == 1:\r\n continue\r\n\r\n for (tf, array_index) in to_explore:\r\n works = explorer.handle(s.ty, f.name, tf, array_index)\r\n for name, v in works:\r\n worklist.append(name, v)\r\n\r\n sample.dump_struct(s)\r\n\r\n\r\ndef explore_global_percpu(explorer, worklist, addr, sym, name):\r\n # was_ptr is needed because we don't model array of pointers of\r\n # pointers (es: current_task). We miss a step of derefs, but\r\n # the __per_cpu_offset is stable so it should not affect the\r\n # analysis.\r\n was_ptr = False\r\n if is_struct(sym.type):\r\n sym = sym.cast(sym.type.pointer())\r\n else:\r\n was_ptr = True\r\n\r\n s = Struct(addr, sym.type, name, global_container=True)\r\n sym_array_ptr = sym.type.array(0, NR_CPUS-1).pointer()\r\n field_value = gdb.Value(addr).cast(sym_array_ptr).dereference()\r\n f = s.addField(name, field_value)\r\n s.size = 8*(NR_CPUS)\r\n\r\n for offset, name, v in explorer.handle_percpu_field(sym, name):\r\n worklist.append(name, v)\r\n if was_ptr:\r\n v = v.dereference()\r\n f.add_array_element(v, check=False)\r\n \r\n return s\r\n \r\ndef explore_global_percpus(sample, explorer, worklist, global_percpus):\r\n\r\n addr = 0xffffffff82000000\r\n sorted_percpus = sorted(global_percpus.items(), key=lambda x:x[0])\r\n\r\n for (filename, name), sym in sorted_percpus:\r\n logging.debug(\"Loading GLOBAL_PERCPU: %s %s\" % (filename, name))\r\n s = explore_global_percpu(explorer, worklist, addr, sym, name) \r\n sample.dump_struct(s)\r\n logging.debug(s)\r\n addr += 8*(NR_CPUS)\r\n\r\n\r\ndef do_analysis(worklist, sample, explorer):\r\n for i, work in enumerate(worklist.worklist):\r\n walk_struct(work, worklist, sample, explorer)\r\n\r\n if i % 50000 == 0:\r\n tot = len(worklist.worklist)\r\n sys.stdout.write(\"processed: %d total: %d left: %d\\n\" % (i, tot,\r\n tot - i))\r\n sys.stdout.flush()\r\n \r\ndef explore_sample():\r\n exp_result = \"../explorations/%s\" % (SNAME)\r\n print(\"[+] Exploration result in %s\" % exp_result)\r\n sample = Sample(exp_result)\r\n L = Loader(KDIR)\r\n\r\n worklist = L.WORKLIST\r\n global_structs_addr = set([gdb_value_to_int(v.address) for (_, v, _) in worklist.worklist])\r\n explorer = Explorer(L.NODE_INFO, L.POINTER_INFO, global_structs_addr)\r\n global_heads = L.GLOBAL_HEADS\r\n global_percpus = L.PERCPU_GLOBALS\r\n\r\n for s in L.GLOBAL_CONTAINERS:\r\n sample.dump_struct(s)\r\n\r\n explore_global_percpus(sample, explorer, worklist, global_percpus)\r\n\r\n for i in global_heads:\r\n struct_type, field_name = global_heads[i]\r\n for name, v in explorer.handle_global_head(i, struct_type, field_name):\r\n worklist.append(name, v)\r\n\r\n print(\"[+] Ready to start the exploration\")\r\n do_analysis(worklist, sample, explorer)\r\n logging.info(\"[+] We found %d structs\" % sample.counter)\r\n return\r\n\r\n\r\ndef create_dir(d):\r\n if not os.path.exists(d):\r\n os.makedirs(d)\r\n \r\ndef main():\r\n print(\"[+] Target kernel %s\" % KDIR)\r\n \r\n create_dir(\"../logs\")\r\n create_dir(\"../explorations/\")\r\n \r\n log_file = \"../logs/%s\" % (SNAME)\r\n print(\"[+] Logging in %s\" % log_file)\r\n logging.basicConfig(format='%(levelname)s : %(message)s',\r\n stream=open(log_file, \"w\"),\r\n level=logging.DEBUG)\r\n\r\n logging.debug(\"gdb_port = %d qemu_port = %d\" % (GDB_PORT, QEMU_PORT))\r\n\r\n gdb.execute('add-symbol-file %s/vmlinux 0' % KDIR, to_string=True)\r\n gdb.execute('set architecture i386:x86-64', to_string=True)\r\n gdb.execute('set max-value-size unlimited', to_string=True)\r\n gdb.execute('maint set symbol-cache-size 4096')\r\n connect_gdb_remote(GDB_PORT)\r\n\r\n connect_qemu_monitor('localhost', QEMU_PORT)\r\n send_qemu_monitor(b'stop')\r\n send_qemu_monitor('loadvm %s' % SNAME)\r\n\r\n load_executable_sections(KDIR)\r\n\r\n print('\\n------ Analyzing %s ------' % SNAME)\r\n start = time.time()\r\n explore_sample()\r\n print(\"Exploration took: %.2fs\" % (time.time() - start))\r\n\r\n gdb.execute('disconnect')\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n main()\r\n except Exception as err:\r\n print(traceback.print_exc())\r\n gdb.execute('disconnect')\r\n\r\n\r\n # cProfile.run('main()', filename=\"/tmp/prof%d\" % SID, sort=1)\r\n\r\n # sym = gdb.lookup_symbol(\"pid_hash\")[0]\r\n # print(is_valid_struct(a.value()))\r\n # sys.exit(-1)\r\n # t = gdb.lookup_type(\"struct mm_slot\")\r\n # print(find_offset(t, \"mm_node\", array_index=-1))\r\n # v = gdb.Value(0x2345234523424).cast(t)\r\n # sys.exit(-1)\r\n","sub_path":"src/locate_struct.py","file_name":"locate_struct.py","file_ext":"py","file_size_in_byte":9063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"640299263","text":"# -*- coding: utf-8 -*-\n\n\nimport os\n\n\ntry:\n from django.urls import path\n HAS_PATH = True\nexcept ImportError:\n HAS_PATH = False\n\n\ntry:\n from django.urls import re_path\n HAS_RE_PATH = True\nexcept ImportError:\n HAS_RE_PATH = False\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nSECRET_KEY = 'test'\n\n\nROOT_URLCONF = 'tests.urls'\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join('BASE_DIR' , 'test.sqlite3'),\n }\n}\n\n\nINSTALLED_APPS = [\n 'tests',\n]\n\n\n# eof\n","sub_path":"tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"288345391","text":"import requests\nfrom flask import json\nfrom bs4 import BeautifulSoup\nfrom urllib import parse\nfrom flask import Flask\nfrom flask_restful import Resource, Api, reqparse\n\napp = Flask(__name__)\napi = Api(app)\n\n\nclass SteamSearch(Resource):\n def put(self):\n \n parser = reqparse.RequestParser()\n parser.add_argument('query', required=True,\n help='A search term needs to be provided')\n args = parser.parse_args()\n\n formattedSearchTerm = parse.urlencode({'query': args.query})\n page=1\n while page<=3:\n r = requests.get(\n f\"'https://www.walmart.com/search/?page='+str(page)+'&ps=40&{formattedSearchTerm}'\")\n \n \n \n results = []\n # just get the code, no headers or anything\n plain_text = r.text.encode('ascii', 'replace')\n # BeautifulSoup objects can be sorted through easy\n soup = BeautifulSoup(plain_text,'html.parser')\n for link in soup.findAll('a', {'class': 'product-title-link line-clamp line-clamp-2'}):\n href =[]\n href.append(\"https://www.walmart.com\"+link.get('href'))\n print(href)\n \n \n for url in href:\n \n source_code = requests.get(url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text,\"lxml\")\n for item_name in soup.findAll('h1', {'class': 'prod-ProductTitle font-normal'}):\n title=item_name.string\n for brand_name in soup.findAll('a',{'class':'prod-brandName'}):\n brand=brand_name.string\n for ratings in soup.findAll('span',{'itemprop':'ratingValue'}):\n rating=ratings.string\n for p1 in soup.findAll('span',{'class':'price-currency'}):\n p11=p1.string\n for p2 in soup.findAll('span',{'class':'price-characteristic'}):\n p22=p2.string\n for p3 in soup.findAll('span',{'class':'price-mark'}):\n p33=p3.string\n for p4 in soup.findAll('span',{'class':'price-mantissa'}):\n \n p44=p4.string\n price=p11+p22+p33+p44\n print(title,price,brand,rating) \n results.append({'title':title,\n 'brand':brand,\n 'rating':rating,\n 'price':price})\n return results\n page+=1\n \n \n \n \n \n \n\n\napi.add_resource(SteamSearch, '/query')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"apii/Trial_flask _2nd.py","file_name":"Trial_flask _2nd.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"587390858","text":"\nnamefile = input('Enter a file name: ')\nfileopen = open(namefile)\n\nemail = dict()\nfor line in fileopen:\n words = line.split()\n if not line.startswith('From: ') : continue\n else:\n for word in words:\n if '@' in word:\n email[word] = email.get(word,0) + 1\n\nbigword = None\nbigcount = None\nfor k,v in email.items():\n if bigcount is None or v > bigcount:\n bigword = k\n bigcount = v\n\nprint(bigword, bigcount)","sub_path":"OnlineClasses/Programming for Everybody/Class2/ex9_4.py","file_name":"ex9_4.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"135644659","text":"from typing import List, Tuple\n\nimport torch\n\nfrom RL import Episode\nfrom .general import to_list, to_list_float_fixed, DataTypesSize, to_int\n\n\ndef get_episode_flat(state_size, action_size):\n transition_size = 2 * state_size + action_size + 1\n\n def transformer(data, start, _):\n episode_length = to_int(data, start)\n start += DataTypesSize.Int\n num_floats = episode_length * transition_size\n value, bytes_read = to_list_float_fixed(data, num_floats, start)\n return value, bytes_read + DataTypesSize.Int\n\n return transformer\n\n\ndef episode_to_tensors(episode: List[float], state_size, action_size, device):\n transition_size = 2 * state_size + action_size + 1\n\n def data_slice(size, stride, i):\n return slice(transition_size * i + stride, transition_size * i + size + stride)\n\n length_range = range(int(len(episode) / transition_size))\n stride = 0\n\n states = [episode[data_slice(state_size, stride, i)] for i in length_range]\n stride += state_size\n\n actions = [episode[data_slice(action_size, stride, i)] for i in length_range]\n stride += action_size\n\n rewards = [episode[data_slice(1, stride, i)] for i in length_range]\n stride += 1\n\n next_states = [episode[data_slice(state_size, stride, i)] for i in length_range]\n\n states = torch.tensor(states, dtype=torch.float32, device=device)\n next_states = torch.tensor(next_states, dtype=torch.float32, device=device)\n actions = torch.tensor(actions, dtype=torch.float32, device=device).long()\n rewards = torch.tensor(rewards, dtype=torch.float32, device=device)\n\n return states, actions, rewards, next_states\n\n\ndef to_training_data(training_data_bytes: bytes, start_index: int, state_size: int, action_size: int, device='cuda') -> \\\n Tuple[List[Episode], int]:\n training_data, bytes_read = to_list(training_data_bytes, get_episode_flat(state_size, action_size), start_index)\n training_data = [episode_to_tensors(episode, state_size, action_size, device) for episode in training_data]\n return training_data, bytes_read\n","sub_path":"python/src/serialization/training_data_serialization.py","file_name":"training_data_serialization.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"97670822","text":"from engine import human_report_time\nimport json\nimport os\nimport sys\n\nheader_datetime = \"id,datetime,num_bikes_available,num_bikes_disabled,num_docks_available,num_docks_disabled,is_installed,is_renting,is_returning\"\nheader_epoch = \"id,epoch,num_bikes_available,num_bikes_disabled,num_docks_available,num_docks_disabled,is_installed,is_renting,is_returning\"\n\n\ndef print_file(file, is_epoch):\n id = int(file.split(\".\")[0])\n with open(file, \"r\") as fp:\n for line in fp:\n row = json.loads(line)\n time = row[\"epoch\"]\n data = row[\"data\"]\n if not is_epoch:\n time = human_report_time(time)\n data[\"id\"] = id\n data[\"time\"] = time\n print(\"{id},{time},{num_bikes_available},{num_bikes_disabled},{num_docks_available},{num_docks_disabled},{is_installed},{is_renting},{is_returning}\".format(**data))\n\n\ndef print_all_files(files, is_epoch):\n is_print_all = True\n if len(files) > 0:\n is_print_all = False\n\n if is_epoch:\n print(header_epoch)\n else:\n print(header_datetime)\n\n for file in os.listdir('.'):\n if os.path.isfile(file) and file.endswith(\".log\"):\n if is_print_all or file in files:\n print_file(file, is_epoch)\n\n\nif __name__ == \"__main__\":\n files = []\n is_epoch = False\n\n for i, argv in enumerate(sys.argv):\n if i > 0:\n if argv == \"-e\":\n is_epoch = True\n else:\n files.append(argv)\n\n print_all_files(files, is_epoch)\n","sub_path":"log_reader.py","file_name":"log_reader.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"59804380","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Rootuple\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10000\n\noutputname = 'gen.root'\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n\"file:/uscms/home/asanchez/nobackup/csa14/MINIAOD/6A81C41C-0606-E411-A991-20CF3027A633.root\"\n )\n)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(outputname),\n closeFileFast = cms.untracked.bool(True)\n)\n\nprocess.rootuple = cms.EDAnalyzer('MiniAODRootupleChicGen')\n\nprocess.p = cms.Path(process.rootuple)\n","sub_path":"Ponia/RootupleChib/test/testMiniAODRootuple_cfg.py","file_name":"testMiniAODRootuple_cfg.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"11573529","text":"#-*- coding:utf-8 -*-\r\n\r\nfrom flask import request, render_template, session, redirect, url_for, flash\r\nfrom flask.ext.sqlalchemy import SQLAlchemy\r\nfrom datetime import timedelta\r\nimport requests\r\nfrom itsdangerous import URLSafeSerializer\r\n\r\nfrom __init__ import app\r\n\r\ndb= SQLAlchemy(app)\r\n\t\r\nfrom models import *\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n\tpost_query=Post2.query.order_by('id desc').all()\r\n\treturn render_template('index.html',post_query=post_query)\r\n\r\n@app.route('/write')\r\ndef write():\r\n\tif 'logged_in' in session:\r\n\t\tif session['logged_in']==True:\r\n\t\t\treturn render_template('write.html')\r\n\t\telse:\r\n\t\t\treturn redirect(url_for('login'))\r\n\telse:\r\n\t\treturn redirect(url_for('index'))\r\n\r\n@app.route('/write/check',methods=['POST'])\r\ndef write_check():\r\n\tpost_title=request.form['title']\r\n\tpost_body=request.form['text']\r\n\tuser_query=User2.query.filter(User2.username==session['username']).first()\r\n\tp=Post2(user_query.id,post_title,post_body)\r\n\tdb.session.add(p)\r\n\tdb.session.commit()\r\n\treturn redirect(url_for('index'))\r\n\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n\tsession['logged_in']=False\r\n\tsession.pop('username',None)\r\n\treturn redirect(url_for('index'))\r\n\r\n@app.route('/login')\r\ndef login():\r\n\tif 'logged_in' in session:\r\n\t\tif session['logged_in']:\r\n\t\t\treturn redirect(url_for('index'))\r\n\t\telse:\r\n\t\t\treturn render_template('login.html')\r\n\telse:\r\n\t\treturn render_template('login.html')\r\n\r\n@app.route('/login/check',methods=['POST'])\r\ndef login_check():\r\n\tusername=request.form['username']\r\n\tpassword=request.form['password']\r\n\tuser_query=User2.query.filter(User2.username==username).first()\r\n\tif user_query:\r\n\t\tif user_query.check_password_hash(password):\r\n\t\t\tif user_query.is_active==True:\r\n\t\t\t\tsession['logged_in']=True\r\n\t\t\t\tsession['username']=username\r\n\t\t\t\tif user_query.is_admin:\r\n\t\t\t\t\tsession['is_admin']=True\r\n\t\t\t\telse:\r\n\t\t\t\t\tsession['is_admin']=False\r\n\t\t\telse:\r\n\t\t\t\treturn u'메일 인증 먼저 하세요'\r\n\t\t\treturn redirect(url_for('index'))\r\n\t\telse:\r\n\t\t\treturn 'password wrong'\r\n\telse:\r\n\t\treturn 'id wrong'\r\n\r\n@app.route('/signup')\r\ndef signup():\r\n\treturn render_template('signup.html')\r\n\r\n@app.route('/signup/check',methods=['POST'])\r\ndef signup_check():\r\n\tusername=request.form['username']\r\n\tpassword=request.form['password']\r\n\temail=request.form['email']\r\n\tuser2=User2(username,password,email)\r\n\tdb.session.add(user2)\r\n\tdb.session.commit()\r\n\tsend_simple_message(username=username,email=email)\r\n\treturn redirect(url_for('index'))\r\n\r\n\r\n@app.route('/doublecheck')\r\ndef doublecheck():\r\n\tusername=request.args.get('username')\r\n\tname=User2.query.filter(User2.username==username).first()\r\n\tif name:\r\n\t\treturn '0'\r\n\telse:\r\n\t\treturn '1'\r\n\r\n@app.route('/admin')\r\ndef admin_page():\r\n\tif 'is_admin' in session and session['is_admin']:\r\n\t\treturn 'Admin'\r\n\telse:\r\n\t\treturn 'User'\r\n\r\n\r\n\r\n@app.route('/activate/')\r\ndef activate(hash_value):\r\n\ts=URLSafeSerializer(app.config.get('SECRET_KEY'))\r\n\ta=s.loads(hash_value)\r\n\tuser_query=User2.query.filter(User2.email==a).first()\r\n\tuser_query.is_active=True\r\n\tdb.session.add(user_query)\r\n\tdb.session.commit()\r\n\treturn u'인증이 완료되었습니다'\r\n\r\n@app.before_request\r\ndef make_session_timeout():\r\n\tsession.permanent=True\r\n\tapp.permanent_session_lifetime=timedelta(minutes=5)\r\n\r\ndef send_simple_message(username,email):\r\n\ts=URLSafeSerializer(app.config.get('SECRET_KEY'))\r\n\thash_value=s.dumps(email)\r\n\treturn requests.post(\r\n\t\t\"https://api.mailgun.net/v2/sandbox79e52ac751eb4923ba69abb7fa180171.mailgun.org/messages\",\r\n\t\tauth=(\"api\", \"key-c6b7a97cf8c37a902f6ebd8e85edfa65\"),\r\n\t\tdata={\"from\": \"no-reply \",\r\n\t\t\tu\"to\": username+u\"<\"+email+u\">\",\r\n\t\t\t\"subject\": \"Hello\",\r\n\t\t\t\"text\": \"http://54.64.200.183:5000/activate/\"+hash_value})\r\n\r\n\r\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196589668","text":"#!/usr/bin/env python\nimport sys, os, re, json, csv, ckanapi\nfrom pipe.gadgets import get_package_parameter\n#import util\n#from . import util\n#import loaders, schema, pipeline\n\nfrom datetime import datetime\n\nsys.path.insert(0, '/Users/drw/WPRDC/etl-dev/wprdc-etl') # A path that we need to import code from\nsys.path.insert(0, '/home/sds25/wprdc-etl') # A path that we need to import code from\nimport pipeline as pl\n\n#sys.path.insert(0, '/Users/drw/WPRDC/etl-dev/wprdc-etl/pipeline') \nfrom marshmallow import fields, post_load, pre_load\nfrom collections import OrderedDict, defaultdict\nfrom pprint import pprint\n\n\nDEFAULT_CKAN_INSTANCE = 'https://data.wprdc.org'\n\ndef convert_none_to(x,new_value):\n if x is None:\n return new_value\n return x\n\nclass BaseTransactionsSchema(pl.BaseSchema):\n zone = fields.String()\n start = fields.DateTime()\n end = fields.DateTime()\n utc_start = fields.DateTime()\n\n class Meta:\n ordered = True\n\nclass TransactionsSchema(BaseTransactionsSchema):\n transactions = fields.Integer()\n payments = fields.Float()\n\n @pre_load\n def cast_fields(self,data):\n data['payments'] = float(data['payments'])\n # This may not be necessary, but ensuring that datetimes are in\n # ISO format is the best way of preparing timestamps to be\n # sent to CKAN.\n data['start'] = datetime.strptime(data['start'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n data['end'] = datetime.strptime(data['end'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n data['utc_start'] = datetime.strptime(data['utc_start'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n\nclass SplitTransactionsSchema(BaseTransactionsSchema):\n \"\"\"The split transactions schema handles the case where transactions are to be split between\n mobile transactions and meter transactions.\"\"\"\n meter_transactions = fields.Integer()\n meter_payments = fields.Float()\n mobile_transactions = fields.Integer()\n mobile_payments = fields.Float()\n\n @pre_load\n def cast_fields(self,data):\n # If there are zero meter payments in a time slot when there are some\n # mobile payments, convert the None values for meter-payment parameters\n # to appropriately typed zeros.\n data['meter_payments'] = float(convert_none_to(data['meter_payments'],0.0))\n data['mobile_payments'] = float(convert_none_to(data['mobile_payments'],0.0))\n data['meter_transactions'] = convert_none_to(data['meter_transactions'],0)\n data['mobile_transactions'] = convert_none_to(data['mobile_transactions'],0)\n # This may not be necessary, but ensuring that datetimes are in\n # ISO format is the best way of preparing timestamps to be\n # sent to CKAN.\n data['start'] = datetime.strptime(data['start'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n data['end'] = datetime.strptime(data['end'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n data['utc_start'] = datetime.strptime(data['utc_start'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n\nclass SamplingTransactionsSchema(TransactionsSchema):\n parent_zone = fields.String()\n\nclass SplitSamplingTransactionsSchema(SplitTransactionsSchema):\n parent_zone = fields.String()\n\nclass OccupancySchema(pl.BaseSchema):\n zone = fields.String()\n start = fields.DateTime()\n end = fields.DateTime()\n utc_start = fields.DateTime()\n transactions = fields.Integer()\n car_minutes = fields.Integer()\n payments = fields.Float()\n durations = fields.Dict() # [ ] Verify that the deployed version of wprdc-etl can handle such Dict/JSON fields.\n inferred_occupancy = fields.Integer()\n\n class Meta:\n ordered = True\n\n @pre_load\n def cast_fields(self,data):\n if data['durations'] is None:\n data['durations'] = '{}'\n data['durations'] = json.loads(data['durations'])\n\n if data['payments'] is None:\n data['payments'] = 0.0\n data['payments'] = float(data['payments'])\n\n if data['car_minutes'] is None:\n data['car_minutes'] = 0\n # This may not be necessary, but ensuring that datetimes are in\n # ISO format is the best way of preparing timestamps to be\n # sent to CKAN.\n data['start'] = datetime.strptime(data['start'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n data['end'] = datetime.strptime(data['end'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n data['utc_start'] = datetime.strptime(data['utc_start'],\"%Y-%m-%d %H:%M:%S\").isoformat()\n\n# @pre_load\n# def just_print_out_the_data(self,data):\n# pprint(data)\n# print(\"ParkingSchema.pre_load: type of data = {}\".format(type(data)))\n\n #@pre_load\n #def process_na_zone(self, data):\n # zone = data.get('zone')\n # if zone.lower() in ['n/a', 'osc']:\n # data['zone'] = None\n # return data\n\n #@post_load\n #def combine_date_and_time(self, in_data):\n # in_data['arrest_datetime'] = (datetime(\n # in_data['arrest_date'].year, in_data['arrest_date'].month,\n # in_data['arrest_date'].day, in_data['arrest_time'].hour,\n # in_data['arrest_time'].minute, in_data['arrest_time'].second\n # ))\n\ndef write_to_csv(filename,list_of_dicts,keys):\n with open(filename, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys, extrasaction='ignore', lineterminator='\\n')\n dict_writer.writeheader()\n dict_writer.writerows(list_of_dicts)\n\ndef get_package_parameter(site,package_id,parameter=None,API_key=None):\n \"\"\"Gets a CKAN package parameter. If no parameter is specified, all metadata\n for that package is returned.\"\"\"\n try:\n ckan = ckanapi.RemoteCKAN(site, apikey=API_key)\n metadata = ckan.action.package_show(id=package_id)\n if parameter is None:\n return metadata\n else:\n return metadata[parameter]\n except:\n raise RuntimeError(\"Unable to obtain package parameter '{}' for package with ID {}\".format(parameter,package_id))\n\ndef find_resource_id(site,package_id,resource_name,API_key=None):\n#def get_resource_id_by_resource_name():\n # Get the resource ID given the package ID and resource name.\n resources = get_package_parameter(site,package_id,'resources',API_key)\n for r in resources:\n if r['name'] == resource_name:\n return r['id']\n return None\n\ndef get_connection_parameters(server, settings_file_path):\n with open(settings_file_path) as f:\n settings = json.load(f)\n site = settings['loader'][server]['ckan_root_url']\n package_id = settings['loader'][server]['package_id']\n API_key = settings['loader'][server]['ckan_api_key']\n return settings, site, package_id, API_key \n\ndef send_data_to_pipeline(server,settings_file_path,resource_name,schema,list_of_dicts,primary_keys,chunk_size=5000):\n # Taken from github.com/WPRDC/stop-in-the-name-of-data.\n\n if resource_name is not None:\n specify_resource_by_name = True\n else:\n specify_resource_by_name = False\n if specify_resource_by_name:\n kwargs = {'resource_name': resource_name}\n #else:\n #kwargs = {'resource_id': ''}\n\n # Synthesize virtual file to send to the FileConnector\n from tempfile import NamedTemporaryFile\n ntf = NamedTemporaryFile()\n\n # Save the file path\n target = ntf.name\n fields_to_publish = schema().serialize_to_ckan_fields() # These are field names and types together\n print(\"fields_to_publish = {}\".format(fields_to_publish))\n field_names = [f['id'] for f in fields_to_publish]\n write_to_csv(target,list_of_dicts,field_names)\n\n # Testing temporary named file:\n #ntf.seek(0)\n #with open(target,'r') as g:\n # print(g.read())\n\n ntf.seek(0)\n # Code below stolen from prime_ckan/*/open_a_channel() but really from utility_belt/gadgets\n #with open(os.path.dirname(os.path.abspath(__file__))+'/ckan_settings.json') as f: # The path of this file needs to be specified.\n\n settings, site, package_id, API_key = get_connection_parameters(server, settings_file_path)\n\n update_method = 'upsert'\n if len(primary_keys) == 0:\n update_method = 'insert'\n\n clear_first = False\n if update_method == 'insert':\n # If the datastore already exists, we need to delete it.\n # We can do this through a CKAN API call (if we know\n # the resource ID) or by setting clear_first = True\n # on the pipeline.\n \n # However, the ETL framework fails if you try to \n # use clear_first = True when the resource doesn't\n # exist, so check that it exists.\n resource_exists = (find_resource_id(site,package_id,kwargs['resource_name'],API_key) is not None)\n if resource_exists:\n clear_first = True\n\n print(\"Preparing to pipe data from {} to resource {} package ID {} on {}, using the update method {} with clear_first = {}\".format(target,list(kwargs.values())[0],package_id,site,update_method,clear_first))\n\n super_pipeline = pl.Pipeline('parking_pipeline',\n 'Pipeline for Parking Data',\n log_status=False,\n settings_file=settings_file_path,\n settings_from_file=True,\n #start_from_chunk=0, # Unsupported by /home/sds25/wprdc-etl/ version of pipeline.\n chunk_size=chunk_size\n ) \\\n .connect(pl.FileConnector, target, encoding='utf-8') \\\n .extract(pl.CSVExtractor, firstline_headers=True) \\\n .schema(schema) \\\n .load(pl.CKANDatastoreLoader, server,\n clear_first=clear_first,\n fields=fields_to_publish,\n #package_id=package_id,\n #resource_id=resource_id,\n #resource_name=resource_name,\n key_fields=primary_keys,\n method=update_method,\n **kwargs)\n\n pipe_output = super_pipeline.run()\n\n package_name = get_package_parameter(site,package_id,'title',API_key)\n\n log = open('uploaded.log', 'w+')\n\n if specify_resource_by_name:\n print(\"Data successfully piped to {}/{}.\".format(package_name,resource_name))\n success = True\n log.write(\"Finished upserting {} at {} \\n\".format(kwargs['resource_name'],datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n else:\n print(\"Data successfully piped to {}/{}.\".format(package_name,kwargs['resource_id']))\n success = True\n log.write(\"Finished upserting {} at {} \\n\".format(kwargs['resource_id'],datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n log.close()\n ntf.close()\n assert not os.path.exists(target)\n\n resource_id = find_resource_id(site,package_id,kwargs['resource_name'],API_key)\n\n return success\n\n\ndef main():\n upload_in_chunks = True\n server = \"testbed\"\n resource_id = sys.argv[1]\n filename = None\n if len(sys.argv) > 2:\n filename = sys.argv[2] # Name of the file that contains the data to be uploaded.\n #upload_file_to_CKAN(resource_id,filename) # This functionality would best be reproduced\n #by calling the existing wprdc-etl pipeline library.\n\n############\n\nif __name__ == '__main__':\n main()\n","sub_path":"pipe/pipe_to_CKAN_resource.py","file_name":"pipe_to_CKAN_resource.py","file_ext":"py","file_size_in_byte":11245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"236063536","text":"\nfrom flask.ext.sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\n\nclass Note(db.Model):\n __tablename__ = 'notes'\n\n id = db.Column(db.Integer, primary_key=True)\n note = db.Column(db.String())\n created = db.Column(db.DateTime()) \n authorId = db.Column(db.Integer(), db.ForeignKey('users.id'))\n \n def __init__(self, note, created, authorId):\n self.note = note\n self.created = created\n self.authorId = authorId\n\n def __repr__(self):\n return ''.format(self.id)\n\nclass User(db.Model):\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key = True)\n openid = db.Column(db.String(), index = True)\n name = db.Column(db.String())\n email = db.Column(db.String())\n notes = db.relationship('Note', backref='notes.id', lazy='dynamic')\n\n def __init__(self, name, email, openid):\n self.name = name\n self.email = email\n self.openid = openid\n\n def __repr__(self):\n return ''.format(self.openid)\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"550764350","text":"# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\n\nfrom scrapy.spider import BaseSpider\n\n#from scrapy import signals\n#from scrapy.xlib.pydispatch import dispatcher\n\n\nclass RRBaseSpider(BaseSpider):\n\n def __init__(self, *args, **kwargs):\n super(RRBaseSpider, self).__init__(*args, **kwargs)\n #dispatcher.connect(self.spider_closed, signals.spider_closed)\n #dispatcher.connect(self.spider_opened, signals.spider_opened)\n\n self.item_from = kwargs.get(\"item_from\", 0)\n self.item_to = kwargs.get(\"item_to\", 100000)\n","sub_path":"scrapy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"304021900","text":"# Write a function named \"add_time\" that can add a duration to a start time and return the result.\n\ndef add_time(start, duration, day=None):\n\n modifiers_later = 0\n days_later = 0\n\n days_of_week = [\n \"Sunday\",\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\"\n ]\n\n modifier = start.split(\" \")[1]\n initial_modifier = modifier\n\n start = start.split(\" \")\n start.pop(1)\n start = ''.join(start)\n\n hour = int(start.split(\":\")[0]) + int(duration.split(\":\")[0])\n minute = int(start.split(\":\")[1]) + int(duration.split(\":\")[1])\n\n if minute > 59:\n minute -= 60\n hour += 1\n\n hour_modifier = hour\n\n while hour > 12:\n hour -= 12\n\n while hour_modifier > 11:\n hour_modifier -= 12\n modifier = \"PM\" if modifier == \"AM\" else \"AM\"\n modifiers_later += 1\n\n if modifiers_later % 2 != 0:\n if initial_modifier == \"PM\":\n modifiers_later += 1\n else:\n modifiers_later -= 1\n\n days_later = modifiers_later/2\n\n new_time = f\"{hour}:{str(minute).zfill(2)} {modifier}\"\n\n if day:\n weekday = days_of_week.index(day.title())\n weekday_new = int((weekday + days_later) % 7)\n new_time += f\", {days_of_week[weekday_new]}\"\n\n if days_later == 1:\n new_time += \" (next day)\"\n\n if days_later > 1:\n new_time += f\" ({int(days_later)} days later)\"\n\n return new_time\n\n\n\nprint(add_time(\"3:00 PM\", \"3:10\"))\n# # Returns: 6:10 PM\n#\nprint(add_time(\"11:30 AM\", \"2:32\", \"Monday\"))\n# # Returns: 2:02 PM, Monday\n#\nprint(add_time(\"11:43 AM\", \"00:20\"))\n# # Returns: 12:03 PM\n#\nprint(add_time(\"10:10 PM\", \"3:30\"))\n# # Returns: 1:40 AM (next day)\n#\nadd_time(\"11:43 PM\", \"24:20\", \"tueSday\")\n# # Returns: 12:03 AM, Thursday (2 days later)\n","sub_path":"add_time function.py","file_name":"add_time function.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"633588620","text":"import subprocess\nfrom mosaic.utilities.resource_path import resource_path\n\ntry:\n\t__version__=subprocess.check_output(['git', 'describe', '--abbrev=0', '--tags'], stderr=subprocess.STDOUT).strip().lstrip('v')\nexcept:\n\t__version__=\"\"\n\ntry:\n\tif not __version__:\n\t\twith open( resource_path('version-hash'), 'r' ) as f:\n\t\t\t__version__=f.read().strip()\t\t\nexcept:\n\t__version__=\"\"\n\ntry:\n\t__build__=subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], stderr=subprocess.STDOUT).strip()\nexcept:\n\t__build__=\"\"\n\ntry:\n\tif not __build__:\n\t\twith open( resource_path('commit-hash'), 'r' ) as f:\n\t\t\t__build__=f.read().strip()\nexcept:\n\t__build__=\"\"\n","sub_path":"mosaic/_version.py","file_name":"_version.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"388629310","text":"from catboost import CatBoostRegressor\nimport pandas as pd\n\n\ndef apartments_predict(walls_material, floor_number, floors_total, total_area,\n kitchen_area, distance, azimuth) -> float:\n model = CatBoostRegressor()\n model.load_model('apartments_model')\n data = {\n 'wallsMaterial': [walls_material],\n 'floorNumber': [floor_number],\n 'floorsTotal': [floors_total],\n 'totalArea': [total_area],\n 'kitchenArea': [kitchen_area],\n 'distance': [distance],\n 'azimuth': [azimuth]\n }\n df = pd.DataFrame(data)\n return model.predict(df)[0]\n","sub_path":"apartments.py","file_name":"apartments.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"543288036","text":"import os\nimport pygame\nimport operator\nfrom choose_menu import ChooseMenu\nfrom settings import IMAGE_PATH, FPS, WIN_WIDTH, WIN_HEIGHT, RECORD_PATH, COLOR_INACTIVE, COLOR_ACTIVE, game_status, user_info, SOUND_PATH, music\nfrom button import Buttons\nfrom color_settings import *\n\narial=pygame.font.match_font('arial')\nFONT_BIG = pygame.font.Font(arial, 50)\nFONT_SMALL = pygame.font.Font(arial, 35)\n\n\ndef show_player(which: int) -> list:\n '''指定要讀取第幾個txt檔'''\n\n which_file = ['records.txt', 'records2.txt', 'records3.txt']\n last_records = ''\n # 絕對路徑!\n with open(os.path.join(RECORD_PATH, which_file[which]), 'rt') as file:\n records = file.readlines()\n for line in records:\n last_records += f'{line}'\n\n last_records = last_records.split('\\n')\n last_records.pop(-1)\n last_records_list = []\n for i in last_records:\n name, score = i.split('--')\n last_records_list.append((name, float(score)))\n last_records_list = sorted(\n last_records_list, key=operator.itemgetter(1)) # sorted scores\n person_to_show = []\n for line in range(len(last_records_list)):\n each = f'{line+1}. {last_records_list[line]}s'\n new_each = ''\n for i in each:\n if i not in (\"(\", \")\", \"'\"):\n new_each += i\n person_to_show.append(new_each)\n return person_to_show\n\n\ndef draw_text(screen, text, x, y, size: int):\n '''畫布/文字/座標/字體大小'''\n FONT = pygame.font.Font(arial, size)\n txt_surface = FONT.render(str(text), True, WHITE)\n screen.blit(txt_surface, (x, y))\n\n\nclass InputBox:\n\n def __init__(self):\n self.rect = pygame.Rect((WIN_WIDTH/2)-100, (WIN_HEIGHT/3)-30, 140, 40)\n self.color = COLOR_INACTIVE\n self.text = '' # user's name\n self.txt_surface = FONT_BIG.render(self.text, True, self.color)\n self.active = False\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n if self.rect.collidepoint(event.pos):\n # Toggle the active variable.\n self.active = not self.active\n else:\n self.active = False\n # Change the current color of the input box.\n self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE\n if event.type == pygame.KEYDOWN:\n if self.active:\n if event.key == pygame.K_RETURN:\n print(f'player: {self.text}')\n user_info['user_name'] = self.text\n\n c = ChooseMenu()\n c.run()\n game_status['go_input_window'] = True\n\n elif event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n else:\n self.text += event.unicode\n # Re-render the text.\n self.txt_surface = FONT_SMALL.render(\n self.text, True, self.color)\n\n def update(self):\n # Resize the box if the text is too long.\n width = max(200, self.txt_surface.get_width()+10)\n self.rect.w = width\n\n def draw(self, screen):\n # Blit the text.\n screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))\n # Blit the rect.\n pygame.draw.rect(screen, self.color, self.rect, 2)\n\n def get_text(self):\n return self.text\n\n\nclass Input_window:\n def __init__(self, screen):\n self.screen = screen\n\n self.bg = pygame.transform.scale(pygame.image.load(os.path.join(\n IMAGE_PATH, \"level_background.png\")), (WIN_WIDTH, WIN_HEIGHT))\n self.back_image = pygame.transform.scale(\n pygame.image.load(os.path.join(IMAGE_PATH, \"back.png\")), (80, 80))\n self.clock = pygame.time.Clock()\n self.input_box1 = InputBox()\n self.input_boxes = [self.input_box1]\n self.intro_text_1 = 'Input Name To Save Record'\n self.intro_text_2 = 'Hit `ENTER` When Ready'\n self.intro_text_3 = 'Highest scores:'\n self.intro_text_4 = 'Level 1'\n self.intro_text_5 = 'Level 2'\n self.intro_text_6 = 'Level 3'\n self.back_btn = Buttons(5, 5, 80, 80)\n self.buttons = [self.back_btn]\n\n def back_or_not(self, x, y):\n if self.back_btn.clicked(x, y):\n pygame.mixer.music.stop()\n self.play_music()\n if music[\"mute\"]:\n pygame.mixer.music.pause()\n return True\n return False\n\n def play_music(self):\n pygame.mixer.music.load(os.path.join(SOUND_PATH, \"menu1.mp3\"))\n pygame.mixer.music.set_volume(0.3)\n pygame.mixer.music.play(-1)\n\n def run(self):\n while game_status[\"run\"] and not game_status[\"go_start_menu\"]:\n game_status[\"go_input_window\"] = False\n x, y = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_status[\"run\"] = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.back_or_not(x, y):\n game_status[\"go_start_menu\"] = True\n\n for box in self.input_boxes:\n box.handle_event(event)\n\n for box in self.input_boxes:\n box.update()\n\n self.screen.blit(self.bg, (0, 0))\n self.screen.blit(self.back_image, (5, 5))\n for bt in self.buttons:\n x, y = pygame.mouse.get_pos()\n bt.create_frame(x, y)\n bt.draw_frame(self.screen)\n draw_text(self.screen, self.intro_text_1,\n WIN_WIDTH/2 - 220, (WIN_HEIGHT/3)-150, size=50)\n draw_text(self.screen, self.intro_text_2,\n WIN_WIDTH/2 - 200, (WIN_HEIGHT/3)-100, size=50)\n draw_text(self.screen, self.intro_text_3,\n WIN_WIDTH/2 - 130, (WIN_HEIGHT/2)-70, size=50)\n\n # level 1\n draw_text(self.screen, self.intro_text_4,\n WIN_WIDTH/4 - 190, WIN_HEIGHT/2, size=50)\n for i in range(len(show_player(0))):\n draw_text(\n self.screen, show_player(0)[i], WIN_WIDTH/4 - 190, WIN_HEIGHT/2+50+(i*50), 30)\n\n # level 2\n draw_text(self.screen, self.intro_text_5,\n WIN_WIDTH/3 + 70, WIN_HEIGHT/2, size=50)\n for i in range(len(show_player(1))):\n draw_text(\n self.screen, show_player(1)[i], WIN_WIDTH/3 + 70, WIN_HEIGHT/2+50+(i*50), 30)\n\n # level 3\n draw_text(self.screen, self.intro_text_6,\n WIN_WIDTH/2 + 210, WIN_HEIGHT/2, size=50)\n for i in range(len(show_player(2))):\n draw_text(\n self.screen, show_player(2)[i], WIN_WIDTH/2 + 210, WIN_HEIGHT/2+50+(i*50), 30)\n\n for box in self.input_boxes:\n box.draw(self.screen)\n\n for box in self.input_boxes:\n box.update()\n\n pygame.display.update()\n self.clock.tick(FPS)","sub_path":"user_record/user_record.py","file_name":"user_record.py","file_ext":"py","file_size_in_byte":7178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"246672383","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport csv\nfrom datetime import datetime\nfrom collections import namedtuple, defaultdict\nfrom matplotlib import pyplot\n\n\nLogLine = namedtuple(\"LogLine\", [\"timestamp\", \"op_code\", \"value\"])\n\n\ndef parse_time(time_string):\n return datetime.strptime(time_string, \"%H:%M:%S.%f\")\n\n\nwith open(\"co2datalog.csv\", \"r\", newline=\"\") as csvfile:\n data_rows = csv.reader(csvfile)\n\n def transform_csv_data(row):\n return LogLine(timestamp=parse_time(row[0]),\n op_code=int(row[1]),\n value=int(row[2]))\n\n log_data = list(map(transform_csv_data, data_rows))\n\nprint(log_data[0])\n\ndef sort_by_op_code(log_data):\n table = defaultdict(list)\n for x in log_data:\n table[x.op_code].append(x)\n\n return table\n\nlog_table = sort_by_op_code(log_data)\n\nprint(log_table.keys())\n\ndef plot_log_table(log_table):\n pyplot.figure()\n for op_code_table in log_table.values():\n x_data = list(map(lambda x: x.timestamp, op_code_table))\n y_data = list(map(lambda x: x.value, op_code_table))\n pyplot.plot(x_data, y_data, '+',\n label=\"{0:x}\".format(op_code_table[0].op_code))\n\n pyplot.legend()\n \ndef split_log_table(keys, log_table):\n positive_table = {}\n negative_table = {}\n\n for k, v in log_table.items():\n if k in keys:\n positive_table[k] = v\n else:\n negative_table[k] = v\n\n return positive_table, negative_table\n\nco2_table, other_table = split_log_table([0x71, 0x50], log_table)\n\nwiggling_table, other_table = split_log_table([0x6e, 0x4f], other_table)\n\n# plot_log_table(log_table)\n\n# plot_log_table(co2_table)\nplot_log_table(wiggling_table)\nplot_log_table(other_table)\n\n\npyplot.show()\n","sub_path":"data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"607197756","text":"from tensorflow.keras.models import load_model\nfrom tensorflow.keras.applications.inception_v3 import preprocess_input\nimport numpy as np\nfrom utils import load_image_from_path,plot_images_with_class\nimport pandas as pd\nfrom breeds.breeds import dogs_breeds\nfrom tqdm import tqdm\nimport os\n\nmodel=load_model(os.path.join('models','dogs_classifier.h5'))\nmodel.summary()\n\ntesting=pd.read_csv(os.path.join(\"data\",\"test.csv\"))\ntotal=len(testing)\n\ncorrect=0\nwrong=0\nwrong_entries=[]\n\nfor image,breed in tqdm(testing.values):\n\tdog_image=load_image_from_path(image,target_size=(299,299))\n\tdog_image=np.expand_dims(dog_image, axis=0)\n\tprocessed_image=preprocess_input(dog_image)\n\tprediction=model.predict(processed_image)\n\tindex=np.argmax(prediction[0])\n\tlabel=dogs_breeds[index]\n\tif label==breed:\n\t\tcorrect+=1\n\telse:\n\t\twrong+=1\n\t\twrong_entries.append((image,breed,label))\n\nprint(\"Total Testing Data= \",total)\nprint(\"Total Correct Predictions= \",correct)\nprint(\"Total Wrong Predictions= \",wrong)\nprint(\"Accuracy %= \",(correct/total)*100)\nprint(\"Wrong %= \",(wrong/total)*100)\n\ndog_images=[load_image_from_path(dogs[0]) for dogs in wrong_entries]\ntrue_cls=[cls_true[1] for cls_true in wrong_entries]\npred_cls=[cls_pred[2] for cls_pred in wrong_entries]\nplot_images_with_class(dog_images,cls_true=true_cls, cls_pred=pred_cls)\n","sub_path":"Classifier Training/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"481548719","text":"import argparse\nimport re\nimport string\nimport time\nimport torch\nimport torch.nn as nn\n\nimport data\nimport model\n\nparser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')\nparser.add_argument('--data', type=str, default='data/penn/',\n help='location of the data corpus')\nparser.add_argument('--model', type=str, default='LSTM',\n help='type of recurrent net (LSTM, QRNN, GRU)')\nparser.add_argument('--emsize', type=int, default=400,\n help='size of word embeddings')\nparser.add_argument('--nhid', type=int, default=1150,\n help='number of hidden units per layer')\nparser.add_argument('--nlayers', type=int, default=3,\n help='number of layers')\nparser.add_argument('--lr', type=float, default=30,\n help='initial learning rate')\nparser.add_argument('--clip', type=float, default=0.25,\n help='gradient clipping')\nparser.add_argument('--epochs', type=int, default=8000,\n help='upper epoch limit')\nparser.add_argument('--batch_size', type=int, default=80, metavar='N',\n help='batch size')\nparser.add_argument('--bptt', type=int, default=70,\n help='sequence length')\nparser.add_argument('--dropout', type=float, default=0.4,\n help='dropout applied to layers (0 = no dropout)')\nparser.add_argument('--dropouth', type=float, default=0.3,\n help='dropout for rnn layers (0 = no dropout)')\nparser.add_argument('--dropouti', type=float, default=0.65,\n help='dropout for input embedding layers (0 = no dropout)')\nparser.add_argument('--dropoute', type=float, default=0.1,\n help='dropout to remove words from embedding layer (0 = no dropout)')\nparser.add_argument('--wdrop', type=float, default=0.5,\n help='amount of weight dropout to apply to the RNN hidden to hidden matrix')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--nonmono', type=int, default=5,\n help='random seed')\nparser.add_argument('--cuda', action='store_false',\n help='use CUDA')\nparser.add_argument('--log-interval', type=int, default=200, metavar='N',\n help='report interval')\nrandomhash = ''.join(str(time.time()).split('.'))\nparser.add_argument('--save', type=str, default=randomhash+'.pt',\n help='path to save the final model')\nparser.add_argument('--alpha', type=float, default=2,\n help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')\nparser.add_argument('--beta', type=float, default=1,\n help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')\nparser.add_argument('--wdecay', type=float, default=1.2e-6,\n help='weight decay applied to all weights')\nparser.add_argument('--resume', type=str, default='',\n help='path of model to resume')\nparser.add_argument('--optimizer', type=str, default='sgd',\n help='optimizer to use (sgd, adam)')\nparser.add_argument('--when', nargs=\"+\", type=int, default=[-1],\n help='When (which epochs) to divide the learning rate by 10 - accepts multiple')\nargs = parser.parse_args()\nargs.tied = True\n\nimport os\nimport hashlib\nfn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest())\nif os.path.exists(fn):\n print('Loading cached dataset...')\n corpus = torch.load(fn)\nelse:\n print('Producing dataset...')\n corpus = data.Corpus(args.data)\n torch.save(corpus, fn)\n\nntokens = len(corpus.dictionary)\nmodel = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied)\n\nif args.cuda:\n model = model.cuda(3)\n\ndef model_load(fn):\n global model, criterion, optimizer\n with open(fn, 'rb') as f:\n model, criterion, optimizer = torch.load(f)\n\ndef new_tokenize(text):\n words = text.split()\n ids = torch.LongTensor(len(words))\n token = 0\n for word in words:\n if word in corpus.dictionary.word2idx:\n ids[token] = corpus.dictionary.word2idx[word]\n else:\n ids[token] = corpus.dictionary.word2idx['']\n token += 1\n return ids\n\ndef play(text, batch_size=1):\n model.eval()\n text = text.lower()\n text = re.sub('\\d+', 'N', text)\n punc = string.punctuation.replace(\".\", \"’—“”\")\n punc = punc.replace(\"'\", \"\")\n text = text.translate(str.maketrans('', '', punc))\n text = text.replace(\"n't\", \" n't\")\n text = text.replace(\"'s\", \" 's\")\n text = text.replace(\"'ve\", \" 've\")\n text = text.replace(\"'d\", \" 'd\")\n text = text.replace(\"'ll\", \" 'll\")\n data = new_tokenize(text).unsqueeze(1).cuda()\n hidden = model.init_hidden(batch_size)\n output, hidden = model(data, hidden)\n logits = model.decoder(output)\n logProba = nn.functional.log_softmax(logits, dim=1)\n unk_idx = corpus.dictionary.word2idx['']\n mini = torch.min(logProba)\n logProba[:,unk_idx] = mini\n pred_idxs = torch.argmax(logProba, dim=1)\n preds = [corpus.dictionary.idx2word[idx] for idx in pred_idxs]\n next_word = preds[-1]\n return next_word\n\n# Load the best saved model.\nmodel_load(args.save)\n\nwhile True:\n text = input(\"Hey, enter part of a sentence here: \")\n next_word = play(text)\n for i in range(70):\n text = text + \" \" + next_word\n next_word = play(text)\n print(\"Here's what we got:\\n:\", text)\n again = input(\"Press enter to play again! \")\n if again != \"\":\n break","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"173576017","text":"import urllib2\nimport json\nclass Suggester(object):\n pass\n @property\n def keyword(self):\n return self.key\n @keyword.setter\n def keyword(self, value):\n self.key = value\n\n\n @property\n def suggestions(self):\n array = []\n url = 'http://suggest-market.yandex.ru/suggest-market?srv=market&part=' + self.key + '&pos=3&_=1419492563373'\n text = urllib2.urlopen(url).read().decode('utf-8')\n #text = page.read().decode('cp1251')\n c = json.loads(text)\n for i in c[1]:\n array.append(i)\n return array\n\n\ns = Suggester()\ns.key = 'диор'\nfor i in s.suggestions:\n print(i)\n \n","sub_path":"yandex.py","file_name":"yandex.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"525833284","text":"#Creating simple currency converter\n\n#Gather the parameter of interest\n#Construct the URL and send a GET request to it\n#For unsuccessful requests, print the error message\n#For successful request: extract the relevant data and calculate the result\n#Display the result to the user\n\nimport requests\n\nbase_url=\"https://api.exchangerate.host/\"\n\ndate=input(\"Please enter the date (in format 'yyyy-mm-dd' or 'latest'): \")\nbase=input(\"Convert from (currency): \")\ncurr=input(\"Convert to (currency): \")\nquantity=float(input(f'How much {base} you want to convert: '))\n\nurl=base_url + date + \"?base=\" + base + \"&symbol=\" +curr\n\nresponse=requests.get(url)\n\nif (response.ok==False):\n print(f'\\nError {response.status_code}')\n print(response.json()['error'])\nelse:\n data=response.json()\n rate=data['rates'][curr]\n\n result=quantity * rate\n\n print(f'{quantity} {base} is equal to {result} {curr}, based upon exchange rates on {date}')\n","sub_path":"Currency Converter.py","file_name":"Currency Converter.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"195454637","text":"from swampy.TurtleWorld import *\n\nworld = TurtleWorld()\nbob = Turtle()\n\n\ndef draw_square(t, length):\n\n for i in range(4):\n fd(t, length)\n lt(t)\n\n\ndraw_square(bob, 35)\n\nwait_for_user()\n","sub_path":"ch4/4.3.py","file_name":"4.3.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"619113154","text":"from abc import abstractmethod\nfrom de.dm.automation.devices.virtual_devices import VirtualDevice\n\n__author__ = 'd.muth'\n\n\nclass SensorDevice(VirtualDevice):\n @abstractmethod\n def get_sensor_value(self, **kwargs):\n \"\"\"\n Returns the actual sensor value (e.g. rainfall in mm the last hour).\n The returned datatype is not restricted.\n :return: Returns the actual sensor value.\n \"\"\"\n pass\n\n @staticmethod\n def get_hour_arg(__default=12, **kwargs):\n hours = __default\n try:\n if 'hours' in kwargs:\n hours = int(kwargs['hours'])\n except ValueError:\n hours = __default\n\n return hours\n\n @staticmethod\n def get_default_arg(__type, __default=0, **kwargs):\n res = __default\n try:\n if 'default' in kwargs:\n res = __type(kwargs['default'])\n except ValueError:\n res = __default\n\n return res\n\n\nclass WeatherRainfallSensor(SensorDevice):\n\n def __init__(self, config, device_name, connector):\n \"\"\"\n Constructor.\n :param config: The application config.\n :param device_name: The name of the device.\n :param connector: The openweather implementation to use.\n :return:\n \"\"\"\n super(WeatherRainfallSensor, self).__init__(config, device_name)\n self.connector = connector\n\n def get_sensor_value(self, **kwargs):\n \"\"\"\n Utilizes the given conntector to retrieve the rainfall in l/m2 for the last 12h by default. If the\n parameter key 'hours' is given the rainfall is calculated for that value instead.\n :return: Returns the rainfall in l/m2 for the last hours.\n \"\"\"\n hours = SensorDevice.get_hour_arg(12, **kwargs)\n default = SensorDevice.get_default_arg(int, 0, **kwargs)\n stats = self.connector.get_weather(hours=hours)\n\n return reduce(lambda x, y: x+y, [s.rainfall_in_mm for s in stats], default)\n\n\nclass TemperatureMeanSensor(SensorDevice):\n def __init__(self, config, device_name, connector):\n \"\"\"\n Constructor.\n :param config: The application config.\n :param device_name: The name of the device.\n :param connector: The openweather implementation to use.\n :return:\n \"\"\"\n super(TemperatureMeanSensor, self).__init__(config, device_name)\n self.connector = connector\n\n def get_sensor_value(self, **kwargs):\n \"\"\"\n Utilizes the given connector to retrieve the average temperature.\n \"\"\"\n hours = SensorDevice.get_hour_arg(12, **kwargs)\n default = SensorDevice.get_default_arg(int, 10, **kwargs)\n stats = self.connector.get_weather(hours=hours)\n stats.sort(key=lambda tup: tup[0])\n\n sum_of_temps = 0\n cnt = 0\n for i in range(len(stats)):\n current_ts = stats[i].timestamp\n current_temperature = stats[i].temperature\n next_ts = stats[i + 1].timestamp if (i + 1) < len(stats) else current_ts + 15 * 60\n lasted_minutes = int((next_ts - current_ts) / 60)\n\n sum_of_temps += current_temperature * lasted_minutes\n cnt += lasted_minutes\n\n return default if cnt == 0 else sum_of_temps / cnt\n\n\nclass TemperatureForecastSensor(SensorDevice):\n def __init__(self, config, device_name, connector):\n \"\"\"\n Constructor.\n :param config: The application config.\n :param device_name: The name of the device.\n :param connector: The openweather implementation to use.\n :return:\n \"\"\"\n super(TemperatureForecastSensor, self).__init__(config, device_name)\n self.connector = connector\n\n def get_sensor_value(self, **kwargs):\n \"\"\"\n Utilizes the given connector to retrieve the average temperature.\n \"\"\"\n hours = SensorDevice.get_hour_arg(3, **kwargs)\n default = SensorDevice.get_default_arg(int, 10, **kwargs)\n stats = self.connector.get_forecast(hours=hours)\n\n if len(stats) == 0:\n return default\n else:\n stats.sort(key=lambda tup: tup[0])\n return stats[-1].temperature\n","sub_path":"src/main/python/de/dm/automation/devices/sensor_devices.py","file_name":"sensor_devices.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"303740181","text":"import torch\n#torch.manual_seed(442)\n\nfrom read_data import load, PAD_TOK\nfrom lstm import Seq2Seq\nimport time\nfrom tqdm import tqdm\n\nHIDDEN_SIZE = 64\nLEARNING_RATE = 0.5\nN_EPOCHS = 25\n\ndef calc_accuracy(output, target, pad_ind=0):\n _, preds = output.max(1)\n correct_count = 0\n total_count = 0\n for i in range(preds.shape[0]):\n for j in range(preds.shape[1]):\n if target[i, j] != pad_ind:\n total_count += 1\n if target[i, j] == preds[i, j]: correct_count += 1\n return correct_count / total_count\n\ndef train(data_iterable, model, opt, loss_func, pad_ind=0):\n # turn on dropouts, if you use them\n # model.train()\n epoch_losses = []\n epoch_accuracies = []\n num_batches = len(data_iterable)\n for batch in tqdm(data_iterable):\n opt.zero_grad()\n output = model.forward(batch.sent_1, batch.sent_2)\n loss = loss_func(output, batch.sent_2[1:])\n accuracy = calc_accuracy(output, batch.sent_2[1:], pad_ind=pad_ind)\n loss.backward()\n # TODO: clipping?\n opt.step()\n epoch_losses.append(loss)\n epoch_accuracies.append(accuracy)\n avg_loss = sum(epoch_losses)/len(epoch_losses)\n avg_accuracy = sum(epoch_accuracies)/len(epoch_accuracies)\n return avg_loss, avg_accuracy\n\ndef pad_out(output, target, pad_ind):\n output = output[:min(output.shape[0], target.shape[0]), :, :]\n new_out = torch.full([target.shape[0], output.shape[1], output.shape[2]], pad_ind)\n new_out[:output.shape[0], :, :] = output\n return new_out\n\ndef eval_model(data_iterable, model, loss_func, pad_ind=0):\n # turn off dropouts, if you use them\n # model.eval()\n epoch_losses = []\n epoch_accuracies = []\n # speed things up by not calculating gradients, we aren't backpropping\n with torch.no_grad():\n for batch in tqdm(data_iterable):\n output = model.forward(batch.sent_1, None) \n output = pad_out(output, batch.sent_2[1:], pad_ind)\n loss = loss_func(output, batch.sent_2[1:])\n accuracy = calc_accuracy(output, batch.sent_2[1:], pad_ind=pad_ind)\n epoch_losses.append(loss)\n epoch_accuracies.append(accuracy)\n avg_loss = sum(epoch_losses)/len(epoch_losses)\n avg_accuracy = sum(epoch_accuracies)/len(epoch_accuracies)\n return avg_loss, avg_accuracy\n\ndef indices_to_words(phrase, SENTENCES):\n return [SENTENCES.vocab.itos[word] for word in phrase]\n\n\ndef eval_once(data_iterable, model, SENTENCES):\n with torch.no_grad():\n for batch in tqdm(data_iterable):\n output = model.forward(batch.sent_1, None)\n _, preds = output.max(1)\n print('sent_1', indices_to_words(batch.sent_1, SENTENCES))\n print('sent_2', indices_to_words(batch.sent_2, SENTENCES))\n print('preds!', indices_to_words(preds, SENTENCES))\n\ndef train_main():\n train_iter, dev_iter, test_iter, SENTENCES = load()\n torch.save(SENTENCES.vocab.vectors, 'vectors.pth')\n\n PAD_IND = SENTENCES.vocab.stoi[PAD_TOK]\n\n model = Seq2Seq(HIDDEN_SIZE, SENTENCES)\n\n opt = torch.optim.Adadelta(model.parameters(), lr=LEARNING_RATE)\n\n # PERFORMANCE: change loss to softmax/cross-entropy loss?\n loss = torch.nn.CrossEntropyLoss(ignore_index=PAD_IND)\n # loss = torch.nn.MSELoss(ignore_index=SENTENCES.vocab.stoi[PAD_TOK])\n\n min_dev_loss = float('inf')\n\n start = time.time()\n for epoch in range(N_EPOCHS):\n training_loss, training_acc = train(train_iter, model, opt, loss,\n pad_ind=PAD_IND)\n dev_loss, dev_acc = eval_model(dev_iter, model, loss, pad_ind=PAD_IND)\n end = time.time()\n\n if dev_loss < min_dev_loss:\n min_dev_loss = dev_loss\n min_dev_acc = dev_acc\n torch.save(model.state_dict(), 'model.pth')\n\n print('Epoch {} | Elapsed: {:3.3}m'.format(epoch+1, (end - start) / 60))\n print(' Train Loss: {:.3}'.format(training_loss))\n print(' Train Acc: {:.3}'.format(training_acc))\n print(' Dev Loss: {:.3}'.format(dev_loss))\n print(' Dev Acc: {:.3}'.format(dev_acc))\n print()\n print('Total Elapsed Time: {:3.3}m'.format((end - start) / 60))\n print('Final Saved Dev Loss: {:.3}'.format(min_dev_loss))\n print('Final Saved Dev Acc: {:.3}'.format(min_dev_acc))\n\ndef load_main():\n print('loading data and vector embeddings')\n vectors = torch.load('vectors.pth')\n train_iter, dev_iter, test_iter, SENTENCES = load(train_batch_size=1)\n SENTENCES.vocab.vectors = vectors\n print('loaded data and vector embeddings')\n print('loading model from files')\n model = Seq2Seq(HIDDEN_SIZE, SENTENCES)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n print('loaded model')\n #eval_once(dev_iter, model, SENTENCES) \n eval_once(train_iter, model, SENTENCES)\n # TODO: do predictions, print them, get accuracies\n\nif __name__ == '__main__':\n TRAIN = True\n if TRAIN:\n train_main()\n else:\n load_main()\n","sub_path":"lstm/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"167605246","text":"#!/usr/bin/env python\n\"\"\"\nThis script tests the different Spherical Harmonics Transforms on the Mars\ntopography data set\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../..\"))\nfrom pyshtools import shtools\n\n# set shtools plot style:\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../Common\"))\nfrom FigStyle import style_shtools\nmpl.rcParams.update(style_shtools)\n\n\n# ==== MAIN FUNCTION ====\n\ndef main():\n test_RealSpectralAnalysis()\n example()\n\n\ndef test_RealSpectralAnalysis():\n # ---- input parameters ----\n lmax = 5\n ls = np.arange(lmax + 1)\n mask = np.zeros((2, lmax + 1, lmax + 1), dtype=np.bool)\n for l in np.arange(lmax + 1):\n mask[:, l, :l + 1] = True\n mask[1, :, 0] = False\n\n print('\\n---- testing SHPower/DensityL, SHPowerSpectrum/Density ----')\n print('generating normal distributed coefficients with variance 1...')\n coeffs1 = np.random.normal(size=(2, lmax + 1, lmax + 1))\n coeffs1[np.invert(mask)] = 0.\n\n spec1 = np.array([shtools.SHPowerL(coeffs1, l) for l in ls])\n spec2 = shtools.SHPowerSpectrum(coeffs1)\n print('tot power computed with SHPowerL={:2.2f}'.format(np.sum(spec1)))\n print('tot power computed with SHPowerSpectrum={:2.2f}'.format(\n np.sum(spec2)))\n\n spec1 = np.array([shtools.SHPowerDensityL(coeffs1, l) for l in ls])\n spec2 = shtools.SHPowerSpectrumDensity(coeffs1)\n print('tot power computed with SHPowerDensityL={:2.2f}'.format(\n np.sum(spec1 * (2 * ls + 1))))\n print('tot power computed with SHPowerSpectrumDensity={:2.2f}'.format(\n np.sum(spec2 * (2 * ls + 1))))\n\n print('\\n---- testing SHCrossCrossPower/DensityL, ' +\n 'SHCrossCrossPowerSpectrum/Density ----')\n print('generating two sets of normal distributed coefficients ' +\n 'with variance 1...')\n coeffs2 = np.random.normal(size=(2, lmax + 1, lmax + 1))\n coeffs2[np.invert(mask)] = 0.\n\n spec1 = np.array([shtools.SHCrossPowerL(coeffs1, coeffs2, l) for l in ls])\n spec2 = shtools.SHCrossPowerSpectrum(coeffs1, coeffs2)\n print('tot cpower computed with SHCrossPowerL={:2.2f}'.format(\n np.sum(spec1)))\n print('tot cpower computed with SHCrossPowerSpectrum={:2.2f}'.format(\n np.sum(spec2)))\n\n spec1 = np.array([shtools.SHCrossPowerDensityL(coeffs1, coeffs2, l)\n for l in ls])\n spec2 = shtools.SHCrossPowerSpectrumDensity(coeffs1, coeffs2)\n print('tot cpower computed with SHCrossPowerDensityL={:2.2f}'.format(\n np.sum(spec1 * (2 * ls + 1))))\n print('tot cpower computed with SHCrossPowerSpectrumDensity={:2.2f}'\n .format(np.sum(spec2 * (2 * ls + 1))))\n\n print('\\n---- testing SHAdmitCorr and SHConfidence ----')\n admit, dadmit, corr = shtools.SHAdmitCorr(coeffs1, coeffs2)\n confidence = np.array([shtools.SHConfidence(l, corr[l]) for l in ls])\n print('admittance:', admit)\n print('admittance error:', dadmit)\n print('correlation:', corr)\n print('confidence:', confidence)\n\n# ==== PLOT POWER SPECTRA ====\n\n\ndef example():\n \"\"\"\n example that plots the power spectrum of Mars topography data\n \"\"\"\n # --- input data filename ---\n infile = os.path.join(os.path.dirname(__file__),\n '../../ExampleDataFiles/MarsTopo719.shape')\n coeffs, lmax = shtools.SHRead(infile, 719)\n lmax = coeffs.shape[1] - 1\n\n # --- plot grid ---\n grid = shtools.MakeGridDH(coeffs, csphase=-1)\n fig_map = plt.figure()\n plt.imshow(grid)\n\n # ---- compute spectrum ----\n ls = np.arange(lmax + 1)\n pspectrum = shtools.SHPowerSpectrum(coeffs)\n pdensity = shtools.SHPowerSpectrumDensity(coeffs)\n\n # ---- plot spectrum ----\n fig_spectrum, ax = plt.subplots(1, 1)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('degree l')\n ax.grid(True, which='both')\n\n ax.plot(ls[1:], pspectrum[1:], label='power per degree l')\n ax.plot(ls[1:], pdensity[1:], label='power per degree l and order m')\n\n ax.legend()\n\n fig_map.savefig('SHRtopography_mars.png')\n fig_spectrum.savefig('SHRspectrum_mars.png')\n print('mars topography and spectrum saved')\n\n # plt.show()\n\n# ==== EXECUTE SCRIPT ====\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/python/GlobalSpectralAnalysis/SHRealSpectralAnalysis.py","file_name":"SHRealSpectralAnalysis.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"572566886","text":"## Import des bibliothèques nécessaires\nimport networkx as nx # Bibliothèques pour les graphes\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import linregress\nfrom random import *\nfrom math import *\nimport matplotlib.image as mpimg\nfrom operator import add\nimport pickle\n\n\n\n## Données USPS\ndef load_usps(filename) :\n with open(filename ,\"r\") as f:\n f.readline()\n data = [ [float(x) for x in l.split()] for l in f if len(l.split())>2] \n tmp = np.array(data)\n return tmp[:,1:],tmp[:,0].astype(int)\n\ndef show_usps(data) : \n plt.imshow(data.reshape((16,16)),interpolation=\"nearest\",cmap=\"gray\")\n \n# load data\nfileroot = \"/Users/marieheurtevent/Desktop/Ponts/MachineLearning/Projet/USPS/USPS\"\nx_test, y_test = load_usps(fileroot + '_test.txt')\nx_train, y_train = load_usps(fileroot + '_train.txt')\n \ndef visual_ex() :\n rand_tab1 = np.array([randint(0,1000)%x_train.shape[0] for i in range (2)])\n rand_tab2 = np.array([randint(0,1000)%x_test.shape[0] for i in range (2)])\n \n plt.figure()\n plt.subplot(2,2,1)\n show_usps(x_train[rand_tab1[0]])\n plt.subplot(2,2,2)\n show_usps(x_train[rand_tab1[1]])\n plt.subplot(2,2,3)\n show_usps(x_test[rand_tab2[0]])\n plt.subplot(2,2,4)\n show_usps(x_test[rand_tab2[1]])\n plt.show()\n \n print(y_train[rand_tab1[0]],y_train[rand_tab1[1]],y_test[rand_tab2[0]],y_test[rand_tab2[1]])\n \nvisual_ex()\n\n\n# on ne prend que les images de 2 classes différentes\ndef usps_2classes(int1, int2, p) :\n x = np.concatenate((x_train,x_test))\n y = np.concatenate((y_train,y_test))\n index = np.sort(np.concatenate((np.where(y==int1)[0], np.where(y==int2)[0])))\n x = x[index]\n y = y[index]\n n = x.shape[0]\n s = int(n*p)\n return x[:s], y[:s], x[s:], y[s:]\n\n\nx_train12, y_train12, x_test12, y_test12 = usps_2classes(1,2, 0.1)\nstate = np.concatenate((y_train12,np.array([0]*len(y_test12))))\n\n\n\n################# Début de notre travail expérimental #########################\n\n\n## Premières fonctions\n# Affichage selon l'état des noeuds\ndef affichage(G, int1, int2):\n \"\"\" Affiche le graphe G en utilisant différentes couleurs selon l'état des noeuds\n Vert (\"1\"), bleu (sans label), rouge (\"2\")\"\"\"\n \n # Tableaux des noeuds dans chaque état\n state1 = [d[0] for d in G.nodes(data=True) if d[1]['state'] == int1]\n state2 = [d[0] for d in G.nodes(data=True) if d[1]['state'] == int2]\n stateWO = [d[0] for d in G.nodes(data=True) if d[1]['state'] == 0]\n\n # On s'assure que rien n'est affiché\n plt.clf()\n # Postionnement des noeuds\n pos = nx.spring_layout(G)\n # Affichage des noeuds\n nx.draw_networkx_nodes(G, pos, nodelist=state1, node_color='g', label=int1)\n nx.draw_networkx_nodes(G, pos, nodelist=state2, node_color='r', label=int2)\n nx.draw_networkx_nodes(G, pos, nodelist=stateWO, node_color='b', label=\"sans label\")\n # Affichage des liens\n nx.draw_networkx_edges(G, pos)\n # Affichage des labels\n nx.draw_networkx_labels(G, pos, labels=dict(zip(list(G.nodes()),list(G.nodes()))), fontsize = 8)\n # Légende\n plt.legend()\n # Affichage du nombre de noeuds dans chaque état\n print(\"Nombre de \", int1, \" = \", len(state1))\n print(\"Nombre de \", int2, \" = \", len(state2))\n print(\"Nombre sans label = \", len(stateWO))\n \n plt.show()\n \n\n# Create graph \ndef createG(Adj,states) : \n G = nx.from_numpy_matrix(Adj)\n nx.set_node_attributes(G, dict(zip(G.node(),list(states))),'state')\n return G\n \ndef createAdj(x_train12, x_test12, sigma) :\n x = np.concatenate((x_train12,x_test12))\n x2=np.dot(x,np.transpose(x))\n n = x.shape[0]\n Adj = np.zeros((n,n))\n for i in range (n) :\n for j in range (n) :\n #Adj[i][j] = sum((x[i] - x[j])**2)\n Adj[i][j] = x2[i][i] + x2[j][j] -2*np.dot(x[i],x[j])\n Adj = np.exp(-np.multiply(Adj,sigma))\n \n return Adj\n \n\ndef split(M,l,u):\n #Creation des sous-matrices\n M_ll=M[:l,:l]\n M_lu=M[:l,l:l+u]\n M_ul=M[l:l+u,:l]\n M_uu=M[l:l+u,l:l+u]\n return (M_ll, M_lu, M_ul, M_uu)\n\n\ndef labeliseG(Adj,states,l,u):\n d=np.sum(Adj,axis=1)\n D=np.diag(d)\n P=np.dot(np.linalg.inv(D),Adj)\n Adj_ll,Adj_lu,Adj_ul, Adj_uu=split(Adj,l,u)\n D_ll, D_lu, D_ul, D_uu = split(D,l,u)\n P_ll, P_lu, P_ul, P_uu = split(P,l,u)\n f_l=states[:l]\n #f_u=np.dot(np.linalg.inv(D_uu-Adj_uu),Adj_ul)\n I=np.eye(u)\n f_u=np.dot(np.linalg.inv(I-P_uu),P_ul)\n f_u=np.dot(f_u,f_l)\n return (np.concatenate((f_l,f_u)))\n\ndef plotErrorSigma(x_train12, x_test12, state) : \n Adj = createAdj(x_train12,x_test12,0.1)\n l=len(x_train12)\n u=len(x_test12)\n score=[]\n test_sigma_log=np.arange(-2.2,0.1,0.1)\n test_sigma=10**(test_sigma_log)\n for sigma in test_sigma :\n Adj = createAdj(x_train12, x_test12, sigma)\n statesChapeau = labeliseG(Adj,states,l,u)\n statesChapeau = np.where(statesChapeau>1.5, 2, 1)\n print(sum(statesChapeau[len(y_train12):] == y_test12)/len(y_test12)*100, \"%\") \n score.append(sum(statesChapeau[len(y_train12):] == y_test12)/len(y_test12)*100)\n plt.figure()\n plt.plot(test_sigma_log,score)\n plt.xlabel(\"log sigma\")\n plt.ylabel(\"Score\")\n plt.title(\"Score en fonction de sigma\")\n \ndef plotErrorP(x_train, x_test) : \n P = np.arange(0.1,0.5,0.1)\n Score = []\n for p in P :\n x_train12, y_train12, x_test12, y_test12 = usps_2classes(1,2, 0.1)\n state = np.concatenate((y_train12,np.array([0]*len(y_test12))))\n Adj = createAdj(x_train12,x_test12,0.1)\n l=len(x_train12)\n u=len(x_test12)\n score=[]\n test_sigma_log=np.arange(-1.2,0.1,0.1)\n test_sigma=10**(test_sigma_log)\n for sigma in test_sigma :\n Adj = createAdj(x_train12, x_test12, sigma)\n statesChapeau = labeliseG(Adj,states,l,u)\n statesChapeau = np.where(statesChapeau>1.5, 2, 1)\n print(sum(statesChapeau[len(y_train12):] == y_test12)/len(y_test12)*100, \"%\") \n score.append(sum(statesChapeau[len(y_train12):] == y_test12)/len(y_test12)*100)\n Score.append(max(score))\n plt.figure()\n plt.plot(P,Score)\n plt.xlabel(\"Proportion de train data vs. test data\")\n plt.ylabel(\"Score\")\n plt.title(\"Score en fonction de la proportion de train et test data\")\n\n\n\n\n\n\n\n\n","sub_path":"NewCode.py","file_name":"NewCode.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"416057961","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 21 18:01:13 2019\n\n@author: anil durgam\n\"\"\"\nimport os\n \nhtml = \"\"\ndef table(x):\n\tglobal html\n\t\"\"\" Create table with data in a multiline\n\tstring as first argument x)\"\"\"\n\thtml = \"\"\n\thtml += \"\"\n\tfor line in x.splitlines():\n\t\tfor n in line.split():\n\t\t\thtml += f\"\"\n\t\thtml += \"\"\n\thtml += \"
{n}
\"\n\treturn html\n \ndef create(a):\n\ttab = table(text.get(\"1.0\", tk.END))\n\ttext.delete(\"1.0\", tk.END)\n\ttext.insert(\"1.0\", tab)\n\tlabel['text'] = \"Now you can copy the html code for the table (ctrl + a)\"\n \ndef save_html():\n\tif html != \"\":\n\t\twith open(\"table.html\", \"w\") as file:\n\t\t\tfile.write(text.get(\"1.0\", tk.END))\n \ndef show_html():\n\tif os.path.exists(\"table.html\"):\n\t\tos.startfile(\"table.html\")\n \ndef convert_to_html():\n\thtml = table(text.get(\"1.0\",tk.END))\n\tclear()\n\ttext.insert(\"1.0\", html)\n \ndef clear():\n\ttext.delete(\"1.0\", tk.END)\n \nimport tkinter as tk\nroot = tk.Tk()\nroot.title(\"Html table converter\")\nlabel = tk.Label(root, text=\"Insert data here separated by space and press Ctrl+c to convert to html table:\")\nlabel.pack()\ntext = tk.Text(root)\ntext.pack()\ntext.bind(\"\", create)\ntext.focus()\n# create a toplevel menu \nmenubar = tk.Menu(root)\nmenubar.add_command(label=\"Convert - crtl+c |\", command=convert_to_html)\nmenubar.add_command(label=\"Save |\", command=save_html)\nmenubar.add_command(label=\"Show |\", command=show_html)\nmenubar.add_command(label=\"Clear screen |\", command=clear)\n# display the menu\nroot.config(menu=menubar)\nroot.mainloop()","sub_path":"EXAMPLES/tksheet_example2.py","file_name":"tksheet_example2.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35934975","text":"\"\"\"\r\nAssignment:\r\nUse repeated division by 2 to write the number 29(base-10) in binary notation.\r\n\r\nNotes:\r\nSuppose a is a non-negative integer.\r\nDivide a by 2 using the quotient-remainder theorem to obtain a quotient q[0] and a remainder r[0].\r\nIf the quotient is non-zero, divide by 2 again to obtain a quotient q[1] and a remainder r[1].\r\nContinue until a quotient of 0 is obtained.\r\nAt each stage, the remainder must be less than the divisor (2).\r\nEach remainder is always either 0 or 1.\r\n\r\nExample:\r\n 38/2: q[0] = 16, r[0] = 0\r\n 16/2: q[1] = 8, r[1] = 0\r\n 8/2: q[2] = 4, r[2] = 0\r\n 4/2: q[3] = 2, r[3] = 0\r\n 2/2: q[4] = 1, r[4] = 0\r\n 1/2: q[5] = 0, r[5] = 1\r\n\r\nVariables:\r\n\r\nuser_input = user input\r\ninput_div = quotient of division\r\ninput_rem = remainder from division\r\n\r\nTablemates/Groupmates:\r\nPedro, Jin\r\n\"\"\"\r\n\r\n\r\nclass BinaryConverter:\r\n def __init__(self):\r\n self.iterations = 0\r\n self.binary_link_circuit = []\r\n\r\n def binary_divaido(self, user_input):\r\n\r\n # Divide user_input by 2, floor/integer division (not floating point.)\r\n input_div = user_input // 2\r\n\r\n \"\"\"\r\n Find remainder by subtracting 2 * input_div from user_input\r\n i.e. If user_input was 5:\r\n input_div = 5 // 2 # input_div = 2\r\n input_rem = 5 - (2 * input_div) # input_rem = 1\r\n \"\"\"\r\n input_rem = user_input - (input_div * 2)\r\n\r\n self.binary_link_circuit.append(input_rem)\r\n\r\n self.iterations += 1\r\n\r\n # Evaluate if input_div == 0. If true, recursion.\r\n if input_div == 0:\r\n return self.binary_link_circuit\r\n else:\r\n return self.binary_divaido(input_div)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n bc = BinaryConverter()\r\n user_in = int(input(\"Please enter a numerical value: \"))\r\n result = bc.binary_divaido(user_in)\r\n clean_result = \"\"\r\n for rem in reversed(range(len(result))):\r\n clean_result += str(result[rem])\r\n print(f\"Entered Value: {user_in}\\N{SUBSCRIPT ONE}\\N{SUBSCRIPT ZERO}\\n\"\r\n f\"Array: {result}\\n\"\r\n f\"Result: {clean_result}\\N{SUBSCRIPT TWO}\")\r\n","sub_path":"BinaryConverter_CSIS_240_6160.py","file_name":"BinaryConverter_CSIS_240_6160.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"517282889","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\ndef merge_sort_with_swaps(arr, swaps):\n n = len(arr)\n if n <= 1:\n return arr, swaps\n\n # larger cases\n p1, s1 = merge_sort_with_swaps(arr[:n//2], 0)\n p2, s2 = merge_sort_with_swaps(arr[n//2:], 0)\n\n more_swaps = 0\n i1 = 0\n i2 = 0\n n1 = len(p1)\n n2 = len(p2)\n\n # count inversions\n while i1 < n1 or i2 < n2:\n if i1 < n1 and i2 < n2:\n if p1[i1] <= p2[i2]:\n i1 += 1\n else:\n more_swaps += n1 - i1\n i2 += 1\n else:\n break\n\n return sorted(arr), swaps + s1 + s2 + more_swaps\n\n\n# Complete the countInversions function below.\ndef countInversions(arr):\n _, swaps = merge_sort_with_swaps(arr, 0)\n return swaps\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input())\n\n for t_itr in range(t):\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = countInversions(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n\n\"\"\"\ntake sorted array O(nlogn)\ncompare elements in sorted and arr\nfor each element e,\n perform k swaps to get e in arr into the correct position according to sorted\n add k to the running total, and move onto the next element\n\nO(n^2)\n\"\"\"\n\n\"\"\"\nswaps are something that sorting inherently does\nwe need to implement sorting ourselves to keep track of a counter \n\"\"\"","sub_path":"HackerRank/ctci_merge_sort.py","file_name":"ctci_merge_sort.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"245713807","text":"#!/usr/bin/env python\nimport csv\n\nwith open('DATA/airport_boardings.csv') as boardings_in:\n rdr = csv.reader(boardings_in)\n headers = next(rdr)\n for name, code, rank2001, total2001, rank2010, total2010, rank2011, total, pct_change1, pct_change2 in rdr:\n print(code, rank2010)\nprint()\n\nwith open('DATA/knights.txt') as knights_in:\n rdr = csv.reader(knights_in, delimiter=\":\")\n for row in rdr:\n print(row)\nprint()\n\ndata = []\nwith open('DATA/airport_boardings.csv') as boardings_in:\n rdr = csv.DictReader(boardings_in)\n for row in rdr:\n data.append(row)\n# print(row['Code'], row['2010 Rank'])\n\nprint(data[0])\nprint(data[-1])\n\nprint()\n","sub_path":"read_airport_csv.py","file_name":"read_airport_csv.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"534069635","text":"import os\nimport csv\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nprint(dir_path)\nos.chdir(dir_path)\n\nelection_data = os.path.join('Resources', 'election_data.csv')\nprint(election_data)\n\nprint(\"Election Results\")\nprint(\"-----------------------\")\n\ntotalVotes = 0\ncandidates = []\ncandidateVotes = []\n\nwith open(election_data, \"r\", encoding=\"utf-8\") as csvfile:\n csvReader = csv.reader(csvfile,delimiter=\",\")\n \n next(csvfile)\n \n for row in csvReader:\n totalVotes = totalVotes + 1 #for total votes\n \n #Candidate List\n candidateVotes.append(row[2])\n [candidates.append(x) for x in candidateVotes if x not in candidates]\n\n #Number of votes per candidate\n khanCount = candidateVotes.count(candidates[0])\n correyCount = candidateVotes.count(candidates[1])\n liCount = candidateVotes.count(candidates[2])\n otooleyCount = candidateVotes.count(candidates[3])\n\n#Percentage Won\n #khanPercent = round((khanCount) / (totalVotes), 3) * 100\n khanPercent = (khanCount) / (totalVotes) * 100\n correyPercent = (correyCount) / (totalVotes) * 100\n liPercent = (liCount) / (totalVotes) * 100\n otooletPercent = (otooleyCount) / (totalVotes) * 100\n\nvotePercent = [khanPercent, correyPercent, liPercent, otooletPercent]\nvoteCount = [khanCount, correyCount, liCount, otooleyCount]\n\nwinner = max(voteCount)\nwinnerIndex = voteCount.index(winner)\n\n#print(votes) \n#print(khanCount)\n#print(candidateVotes)\n#print(voteCount)\n#print(candidates)\nprint(f\"Total Votes: {totalVotes}\")\nprint(\"-----------------------\")\nprint(f'{candidates[0]}: {(\"%.3f\" % votePercent[0])}% ({voteCount[0]})')\nprint(f'{candidates[1]}: {(\"%.3f\" % votePercent[1])}% ({voteCount[1]})')\nprint(f'{candidates[2]}: {(\"%.3f\" % votePercent[2])}% ({voteCount[2]})')\nprint(f'{candidates[3]}: {(\"%.3f\" % votePercent[3])}% ({voteCount[3]})')\nprint(\"-----------------------\")\nprint(f'Winner: {candidates[winnerIndex]}')\nprint(\"-----------------------\")\n\noutput_path = os.path.join(\"analysis\", \"election_results.txt\")\n\nwith open(output_path, 'w') as file:\n file.write('Election Results\\n')\n file.write(\"-----------------------\\n\")\n file.write(f\"Total Votes: {totalVotes}\\n\")\n file.write(\"-----------------------\\n\")\n file.write(f'{candidates[0]}: {(\"%.3f\" % votePercent[0])}% ({voteCount[0]})\\n')\n file.write(f'{candidates[1]}: {(\"%.3f\" % votePercent[1])}% ({voteCount[1]})\\n')\n file.write(f'{candidates[2]}: {(\"%.3f\" % votePercent[2])}% ({voteCount[2]})\\n')\n file.write(f'{candidates[3]}: {(\"%.3f\" % votePercent[3])}% ({voteCount[3]})\\n')\n file.write(\"-----------------------\\n\")\n file.write(f'Winner: {candidates[winnerIndex]}\\n')\n file.write(\"-----------------------\")","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"345876421","text":"import xml.etree.ElementTree as ET\nimport os\n\ndef getAlter(alter): #for flats and sharps\n\tif alter == '1':\n\t\talter = '#'\n\telif(alter == '-1'):\n\t\talter = 'b'\n\telif(alter == '2'):\n\t\talter = 'x'\n\telif(alter == '-2'):\n\t\talter = 'y'\n\treturn alter\n\ndef same(st): #for notes representing the same thing\n\tif(st == 'Db' or st == 'Bx'):\n\t\tst = 'C#'\n\telif(st == 'Eb' or st == 'Fy'):\n\t\tst = 'D#'\n\telif(st == 'E#' or st == 'Gy'):\n\t\tst = 'F'\n\telif(st == 'Fb' or st == 'Dx'):\n\t\tst = 'E'\n\telif(st == 'Gb' or st == 'Ex'):\n\t\tst = 'F#'\n\telif(st == 'Ab'):\n\t\tst = 'G#'\n\telif(st == 'Bb' or st == 'Cy'):\n\t\tst = 'A#'\n\telif(st == 'B#' or st == 'Dy'):\n\t\tst = 'C'\n\telif(st == 'Cb' or st == 'Ax'):\n\t\tst = 'B'\n\telif(st == 'Ey' or st == 'Cx'):\n\t\tst = 'D'\n\telif(st == 'Ay' or st == 'Fx'):\n\t\tst = 'G'\n\telif(st == 'By' or st == 'Gx'):\n\t\tst = 'A'\n\treturn st\n\nf_part1 = open('part1', 'w')\nf_part2 = open('part2', 'w')\ncnt = 0\nfor file in os.listdir('/home/vasudha/TeamCFSM/data/'):\n\tprint(file)\n\tpath = os.path.join('/home/vasudha/TeamCFSM/data/', file)\n\tprint(cnt)\n\tcnt = cnt+1\n\ttree = ET.parse(path)\n\troot = tree.getroot()\n\n\tpart = root.find('part')\n\n\tmeasures = part.findall('measure')\n\tattributes = measures[0].find('attributes')\n\tdiv = int(attributes.find('divisions').text)\n\tbasic_unit = 64\n\tdur_unit = basic_unit/(4*div)\n\t\n\tfor i in range(0,len(measures)):\n\t\tflag = 0\n\t\tmeasure = measures[i]\n\t\tl = list(measures[i].iter())\n\t\tbackup = measures[i].find('backup')\n\t\tnotes = measure.findall('note')\n\t\tif notes is None: continue\n\t\tif backup is not None:\n\t\t\tidx = l.index(backup) #get index of backup tag\n\t\telse: flag = 1\n\t\tj = 0\n\t\t#for melody line\n\t\twhile(j < len(notes) and (flag == 1 or l.index(notes[j]) < idx)):\n\t\t\tif len(notes[j].findall('rest')) == 1:\n\t\t\t\ttemp = int(notes[j].find('duration').text)\n\t\t\t\ttemp = temp*dur_unit\n\t\t\t\tfor k in range(0, temp):\n\t\t\t\t\tf_part1.write('rest')\n\t\t\t\t\tf_part1.write(' ')\n\t\t\telse: #is a note\n\t\t\t\ttemp = int(notes[j].find('duration').text)\n\t\t\t\ttemp = temp*dur_unit\n\t\t\t\t\n\t\t\t\tif notes[j].find('pitch').find('alter') is not None:\n\t\t\t\t\talter = getAlter(notes[j].find('pitch').find('alter').text)\t\t\t\t\n\t\t\t\t\tst = notes[j].find('pitch').find('step').text + alter\n\t\t\t\t\tst = same(st)\n\t\t\t\t\tst = st + notes[j].find('pitch').find('octave').text\n\t\t\t\telse:\n\t\t\t\t\tst = notes[j].find('pitch').find('step').text + notes[j].find('pitch').find('octave').text\n\t\t\t\twhile(j None:\n super().register_options(register)\n register(\n \"--resolve\",\n type=list,\n member_type=str,\n advanced=False,\n help=(\n \"Only generate lockfiles for the specified resolve(s).\\n\\n\"\n \"Resolves are the logical names for the different lockfiles used in your project. \"\n \"For your own code's dependencies, these come from the option \"\n \"`[python].experimental_resolves_to_lockfiles`. For tool lockfiles, resolve \"\n \"names are the options scope for that tool such as `black`, `pytest`, and \"\n \"`mypy-protobuf`.\\n\\n\"\n \"For example, you can run `./pants generate-lockfiles --resolve=black \"\n \"--resolve=pytest --resolve=data-science` to only generate lockfiles for those \"\n \"two tools and your resolve named `data-science`.\\n\\n\"\n \"If you specify an invalid resolve name, like 'fake', Pants will output all \"\n \"possible values.\\n\\n\"\n \"If not specified, Pants will generate lockfiles for all resolves.\"\n ),\n )\n register(\n \"--custom-command\",\n advanced=True,\n type=str,\n default=None,\n help=(\n \"If set, lockfile headers will say to run this command to regenerate the lockfile, \"\n \"rather than running `./pants generate-lockfiles --resolve=` like normal.\"\n ),\n )\n\n @property\n def resolve_names(self) -> tuple[str, ...]:\n return tuple(self.options.resolve)\n\n @property\n def custom_command(self) -> str | None:\n return cast(\"str | None\", self.options.custom_command)\n\n\n# --------------------------------------------------------------------------------------\n# Generic lockfile generation\n# --------------------------------------------------------------------------------------\n\n\n@dataclass(frozen=True)\nclass PythonLockfile:\n digest: Digest\n resolve_name: str\n path: str\n\n\n@dataclass(frozen=True)\nclass PythonLockfileRequest:\n requirements: FrozenOrderedSet[str]\n interpreter_constraints: InterpreterConstraints\n resolve_name: str\n lockfile_dest: str\n # Only kept for `[python].experimental_lockfile`, which is not using the new\n # \"named resolve\" semantics yet.\n _description: str | None = None\n _regenerate_command: str | None = None\n\n @classmethod\n def from_tool(\n cls,\n subsystem: PythonToolRequirementsBase,\n interpreter_constraints: InterpreterConstraints | None = None,\n *,\n extra_requirements: Iterable[str] = (),\n ) -> PythonLockfileRequest:\n \"\"\"Create a request for a dedicated lockfile for the tool.\n\n If the tool determines its interpreter constraints by using the constraints of user code,\n rather than the option `--interpreter-constraints`, you must pass the arg\n `interpreter_constraints`.\n \"\"\"\n if not subsystem.uses_lockfile:\n return cls(\n FrozenOrderedSet(),\n InterpreterConstraints(),\n resolve_name=subsystem.options_scope,\n lockfile_dest=subsystem.lockfile,\n )\n return cls(\n requirements=FrozenOrderedSet((*subsystem.all_requirements, *extra_requirements)),\n interpreter_constraints=(\n interpreter_constraints\n if interpreter_constraints is not None\n else subsystem.interpreter_constraints\n ),\n resolve_name=subsystem.options_scope,\n lockfile_dest=subsystem.lockfile,\n )\n\n @property\n def requirements_hex_digest(self) -> str:\n \"\"\"Produces a hex digest of the requirements input for this lockfile.\"\"\"\n return calculate_invalidation_digest(self.requirements)\n\n\n@rule(desc=\"Generate lockfile\", level=LogLevel.DEBUG)\nasync def generate_lockfile(\n req: PythonLockfileRequest,\n poetry_subsystem: PoetrySubsystem,\n generate_lockfiles_subsystem: GenerateLockfilesSubsystem,\n) -> PythonLockfile:\n pyproject_toml = create_pyproject_toml(req.requirements, req.interpreter_constraints).encode()\n pyproject_toml_digest, launcher_digest = await MultiGet(\n Get(Digest, CreateDigest([FileContent(\"pyproject.toml\", pyproject_toml)])),\n Get(Digest, CreateDigest([POETRY_LAUNCHER])),\n )\n\n poetry_pex = await Get(\n VenvPex,\n PexRequest(\n output_filename=\"poetry.pex\",\n internal_only=True,\n requirements=poetry_subsystem.pex_requirements(),\n interpreter_constraints=poetry_subsystem.interpreter_constraints,\n main=EntryPoint(PurePath(POETRY_LAUNCHER.path).stem),\n sources=launcher_digest,\n ),\n )\n\n # WONTFIX(#12314): Wire up Poetry to named_caches.\n # WONTFIX(#12314): Wire up all the pip options like indexes.\n poetry_lock_result = await Get(\n ProcessResult,\n VenvPexProcess(\n poetry_pex,\n argv=(\"lock\",),\n input_digest=pyproject_toml_digest,\n output_files=(\"poetry.lock\", \"pyproject.toml\"),\n description=req._description or f\"Generate lockfile for {req.resolve_name}\",\n # Instead of caching lockfile generation with LMDB, we instead use the invalidation\n # scheme from `lockfile_metadata.py` to check for stale/invalid lockfiles. This is\n # necessary so that our invalidation is resilient to deleting LMDB or running on a\n # new machine.\n #\n # We disable caching with LMDB so that when you generate a lockfile, you always get\n # the most up-to-date snapshot of the world. This is generally desirable and also\n # necessary to avoid an awkward edge case where different developers generate different\n # lockfiles even when generating at the same time. See\n # https://github.com/pantsbuild/pants/issues/12591.\n cache_scope=ProcessCacheScope.PER_SESSION,\n ),\n )\n poetry_export_result = await Get(\n ProcessResult,\n VenvPexProcess(\n poetry_pex,\n argv=(\"export\", \"-o\", req.lockfile_dest),\n input_digest=poetry_lock_result.output_digest,\n output_files=(req.lockfile_dest,),\n description=(\n f\"Exporting Poetry lockfile to requirements.txt format for {req.resolve_name}\"\n ),\n level=LogLevel.DEBUG,\n ),\n )\n\n initial_lockfile_digest_contents = await Get(\n DigestContents, Digest, poetry_export_result.output_digest\n )\n # TODO(#12314) Improve error message on `Requirement.parse`\n metadata = LockfileMetadata.new(\n req.interpreter_constraints,\n {PipRequirement.parse(i) for i in req.requirements},\n )\n lockfile_with_header = metadata.add_header_to_lockfile(\n initial_lockfile_digest_contents[0].content,\n regenerate_command=(\n generate_lockfiles_subsystem.custom_command\n or req._regenerate_command\n or f\"./pants generate-lockfiles --resolve={req.resolve_name}\"\n ),\n )\n final_lockfile_digest = await Get(\n Digest, CreateDigest([FileContent(req.lockfile_dest, lockfile_with_header)])\n )\n return PythonLockfile(final_lockfile_digest, req.resolve_name, req.lockfile_dest)\n\n\n# --------------------------------------------------------------------------------------\n# User lockfiles\n# --------------------------------------------------------------------------------------\n\n\nclass _SpecifiedUserResolves(Collection[str]):\n pass\n\n\nclass _UserLockfileRequests(Collection[PythonLockfileRequest]):\n pass\n\n\n@rule\nasync def setup_user_lockfile_requests(\n requested: _SpecifiedUserResolves, all_targets: AllTargets, python_setup: PythonSetup\n) -> _UserLockfileRequests:\n # First, associate all resolves with their consumers.\n resolves_to_roots = defaultdict(list)\n for tgt in all_targets:\n if not tgt.has_field(PythonResolveField):\n continue\n tgt[PythonResolveField].validate(python_setup)\n resolve = tgt[PythonResolveField].value\n if resolve is None:\n continue\n resolves_to_roots[resolve].append(tgt.address)\n\n # Expand the resolves for all specified.\n transitive_targets_per_resolve = await MultiGet(\n Get(TransitiveTargets, TransitiveTargetsRequest(resolves_to_roots[resolve]))\n for resolve in requested\n )\n pex_requirements_per_resolve = []\n interpreter_constraints_per_resolve = []\n for transitive_targets in transitive_targets_per_resolve:\n req_fields = []\n ic_fields = []\n for tgt in transitive_targets.closure:\n if tgt.has_field(PythonRequirementsField):\n req_fields.append(tgt[PythonRequirementsField])\n if tgt.has_field(InterpreterConstraintsField):\n ic_fields.append(tgt[InterpreterConstraintsField])\n pex_requirements_per_resolve.append(\n PexRequirements.create_from_requirement_fields(req_fields)\n )\n interpreter_constraints_per_resolve.append(\n InterpreterConstraints.create_from_compatibility_fields(ic_fields, python_setup)\n )\n\n requests = (\n PythonLockfileRequest(\n requirements.req_strings,\n interpreter_constraints,\n resolve_name=resolve,\n lockfile_dest=python_setup.resolves_to_lockfiles[resolve],\n )\n for resolve, requirements, interpreter_constraints in zip(\n requested, pex_requirements_per_resolve, interpreter_constraints_per_resolve\n )\n )\n return _UserLockfileRequests(requests)\n\n\n# --------------------------------------------------------------------------------------\n# Lock goal\n# --------------------------------------------------------------------------------------\n\n\nclass GenerateLockfilesGoal(Goal):\n subsystem_cls = GenerateLockfilesSubsystem\n\n\n@goal_rule\nasync def generate_lockfiles_goal(\n workspace: Workspace,\n union_membership: UnionMembership,\n generate_lockfiles_subsystem: GenerateLockfilesSubsystem,\n python_setup: PythonSetup,\n python_repos: PythonRepos,\n) -> GenerateLockfilesGoal:\n if python_repos.repos:\n warn_python_repos(\"repos\")\n if python_repos.indexes != [python_repos.pypi_index]:\n warn_python_repos(\"indexes\")\n\n specified_user_resolves, specified_tool_sentinels = determine_resolves_to_generate(\n python_setup.resolves_to_lockfiles.keys(),\n union_membership[PythonToolLockfileSentinel],\n generate_lockfiles_subsystem.resolve_names,\n )\n\n specified_user_requests = await Get(\n _UserLockfileRequests, _SpecifiedUserResolves(specified_user_resolves)\n )\n specified_tool_requests = await MultiGet(\n Get(PythonLockfileRequest, PythonToolLockfileSentinel, sentinel())\n for sentinel in specified_tool_sentinels\n )\n applicable_tool_requests = filter_tool_lockfile_requests(\n specified_tool_requests,\n resolve_specified=bool(generate_lockfiles_subsystem.resolve_names),\n )\n\n results = await MultiGet(\n Get(PythonLockfile, PythonLockfileRequest, req)\n for req in (*specified_user_requests, *applicable_tool_requests)\n )\n\n merged_digest = await Get(Digest, MergeDigests(res.digest for res in results))\n workspace.write_digest(merged_digest)\n for result in results:\n logger.info(f\"Wrote lockfile for the resolve `{result.resolve_name}` to {result.path}\")\n\n return GenerateLockfilesGoal(exit_code=0)\n\n\ndef warn_python_repos(option: str) -> None:\n logger.warning(\n f\"The option `[python-repos].{option}` is configured, but it does not currently work \"\n \"with lockfile generation. Lockfile generation will fail if the relevant requirements \"\n \"cannot be located on PyPI.\\n\\n\"\n \"If lockfile generation fails, you can disable lockfiles by setting \"\n \"`[tool].lockfile = ''`, e.g. setting `[black].lockfile`. You can also manually \"\n \"generate a lockfile, such as by using pip-compile or `pip freeze`. Set the \"\n \"`[tool].lockfile` option to the path you manually generated. When manually maintaining \"\n \"lockfiles, set `[python].invalid_lockfile_behavior = 'ignore'.\"\n )\n\n\nclass AmbiguousResolveNamesError(Exception):\n def __init__(self, ambiguous_names: list[str]) -> None:\n if len(ambiguous_names) == 1:\n first_paragraph = (\n \"A resolve name from the option \"\n \"`[python].experimental_resolves_to_lockfiles` collides with the name of a \"\n f\"tool resolve: {ambiguous_names[0]}\"\n )\n else:\n first_paragraph = (\n \"Some resolve names from the option \"\n \"`[python].experimental_resolves_to_lockfiles` collide with the names of \"\n f\"tool resolves: {sorted(ambiguous_names)}\"\n )\n super().__init__(\n f\"{first_paragraph}\\n\\n\"\n \"To fix, please update `[python].experimental_resolves_to_lockfiles` to use \"\n \"different resolve names.\"\n )\n\n\ndef determine_resolves_to_generate(\n all_user_resolves: Iterable[str],\n all_tool_sentinels: Iterable[type[PythonToolLockfileSentinel]],\n requested_resolve_names: Sequence[str],\n) -> tuple[list[str], list[type[PythonToolLockfileSentinel]]]:\n \"\"\"Apply the `--resolve` option to determine which resolves are specified.\n\n Return a tuple of `(user_resolves, tool_lockfile_sentinels)`.\n \"\"\"\n resolve_names_to_sentinels = {\n sentinel.options_scope: sentinel for sentinel in all_tool_sentinels\n }\n\n ambiguous_resolve_names = [\n resolve_name\n for resolve_name in all_user_resolves\n if resolve_name in resolve_names_to_sentinels\n ]\n if ambiguous_resolve_names:\n raise AmbiguousResolveNamesError(ambiguous_resolve_names)\n\n if not requested_resolve_names:\n return list(all_user_resolves), list(all_tool_sentinels)\n\n specified_user_resolves = []\n specified_sentinels = []\n unrecognized_resolve_names = []\n for resolve_name in requested_resolve_names:\n sentinel = resolve_names_to_sentinels.get(resolve_name)\n if sentinel:\n specified_sentinels.append(sentinel)\n elif resolve_name in all_user_resolves:\n specified_user_resolves.append(resolve_name)\n else:\n unrecognized_resolve_names.append(resolve_name)\n\n if unrecognized_resolve_names:\n raise UnrecognizedResolveNamesError(\n unrecognized_resolve_names,\n {*all_user_resolves, *resolve_names_to_sentinels.keys()},\n description_of_origin=\"the option `--generate-lockfiles-resolve`\",\n )\n\n return specified_user_resolves, specified_sentinels\n\n\ndef filter_tool_lockfile_requests(\n specified_requests: Sequence[PythonLockfileRequest], *, resolve_specified: bool\n) -> list[PythonLockfileRequest]:\n result = []\n for req in specified_requests:\n if req.lockfile_dest not in (NO_TOOL_LOCKFILE, DEFAULT_TOOL_LOCKFILE):\n result.append(req)\n continue\n if resolve_specified:\n resolve = req.resolve_name\n raise ValueError(\n f\"You requested to generate a lockfile for {resolve} because \"\n \"you included it in `--generate-lockfiles-resolve`, but \"\n f\"`[{resolve}].lockfile` is set to `{req.lockfile_dest}` \"\n \"so a lockfile will not be generated.\\n\\n\"\n f\"If you would like to generate a lockfile for {resolve}, please \"\n f\"set `[{resolve}].lockfile` to the path where it should be \"\n \"generated and run again.\"\n )\n\n return result\n\n\ndef rules():\n return collect_rules()\n","sub_path":"src/python/pants/backend/python/goals/lockfile.py","file_name":"lockfile.py","file_ext":"py","file_size_in_byte":18262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545223067","text":"# -*- coding:utf-8 -*-\n# author: Kai Zhang\n# class FTPServer\nimport socketserver\nimport json\nimport os\nimport hashlib\n\nfrom conf.setting import BASE_DIR\n\n\nclass FTPServer(socketserver.BaseRequestHandler):\n '''server ftp'''\n\n def handle(self):\n while True:\n try:\n self.data = self.request.recv(1024)\n print('客户端地址: %s' % (self.client_address[0]))\n if not self.data:\n print('客户端断开了')\n break\n else:\n self.data = json.loads(self.data.decode())\n func = self.data['func']\n if hasattr(self, func):\n getattr(self, func)()\n else:\n print('功能不存在')\n except ConnectionResetError as e:\n print('客户端断开了')\n break\n\n def access(self):\n account = self.data['account']\n password = self.data['password']\n path = BASE_DIR + r'//database//user//' + account + '.json'\n if os.path.isfile(path):\n f = open(path, 'r')\n data = json.loads(f.read())\n f.close()\n if password == data['password']:\n self.request.send(b'100')\n self.disk_path = data['disk_path']\n self.max_size = float(data['disk_size'])\n print('用户%s登陆成功:%s' % (account, self.client_address[0]))\n else:\n self.request.send(b'101')\n else:\n self.request.send(b'102')\n\n def dir(self):\n path = self.disk_path + self.data['path']\n data = ''\n print('读取目录:', path)\n for i in os.listdir(path):\n data += i + '\\n'\n data = data.encode('utf-8')\n info = {\n 'size': len(data)\n }\n self.request.send(json.dumps(info).encode('utf-8'))\n if info['size'] == 0:\n print('目录为空')\n else:\n self.request.recv(1024)\n self.request.send(data)\n\n def cd(self):\n info = {\n 'num': 201\n }\n if self.data['cd_path'] == '.':\n info['num'] = 200\n print('移动到上一级目录')\n else:\n new_path = self.disk_path + self.data['path'] + self.data['cd_path'] + r'//'\n if os.path.isdir(new_path):\n info['num'] = 200\n print('移动到新目录:', new_path)\n self.request.send(json.dumps(info).encode('utf-8'))\n\n def put(self):\n info = {\n 'num': 303\n }\n size = 0\n root_path = self.disk_path\n for root, dirs, files in os.walk(root_path):\n for f in files:\n size += os.path.getsize(os.path.join(root, f))\n size = size + self.data['size']\n recv_flag = False\n path = root_path\n if size / 1024 / 1024 > self.max_size:\n info['num'] = 302\n self.request.send(json.dumps(info).encode('utf-8'))\n else:\n path = root_path + self.data['path'] + self.data['name']\n if os.path.isfile(path):\n info['num'] = 301\n self.request.send(json.dumps(info).encode('utf-8'))\n data = json.loads(self.request.recv(1024).decode())\n if not data['recover']:\n path = root_path + self.data['path'] + self.data['name'] + '.new'\n recv_flag = True\n self.request.send('准备完成!'.encode('utf-8'))\n else:\n info['num'] = 300\n self.request.send(json.dumps(info).encode('utf-8'))\n recv_flag = True\n if recv_flag:\n print('客户端上传该路径文件', path)\n f = open(path, 'wb')\n recv_size = 0\n file_md5 = hashlib.md5()\n while recv_size < self.data['size']:\n if self.data['size'] - recv_size > 1024:\n size = 1024\n else:\n size = self.data['size'] - recv_size\n data = self.request.recv(size)\n recv_size += len(data)\n file_md5.update(data)\n f.write(data)\n f.close()\n md5 = self.request.recv(1024).decode()\n if file_md5.hexdigest() == md5:\n print('接收完毕:', path)\n self.request.send('接收完毕!'.encode('utf-8'))\n else:\n print('文件损坏!')\n self.request.send('文件损坏!'.encode('utf-8'))\n os.remove(path)\n\n def get(self):\n info = {\n 'num': 402\n }\n path = self.disk_path + self.data['path'] + self.data['name']\n print('客户端请求该路径文件', path)\n if os.path.isfile(path):\n size = os.path.getsize(path)\n info['num'] = 400\n info['size'] = size\n send_flag = True\n else:\n info['num'] = 401\n send_flag = False\n self.request.send(json.dumps(info).encode('utf-8'))\n if send_flag:\n self.request.recv(1024)\n f = open(path, 'rb')\n file_md5 = hashlib.md5()\n for i in f:\n self.request.send(i)\n file_md5.update(i)\n f.close()\n self.request.send(file_md5.hexdigest().encode('utf-8'))\n\n def cut(self):\n info = {\n 'num': 502\n }\n path = self.disk_path + self.data['path'] + self.data['name']\n print('客户端删除该路径文件', path)\n if os.path.isfile(path):\n os.remove(path)\n info['num'] = 500\n else:\n info['num'] = 501\n self.request.send(json.dumps(info).encode('utf-8'))\n\n def mkdir(self):\n info = {\n 'num': 602\n }\n new_path = self.disk_path + self.data['path'] + self.data['mkdir_path']\n print('客户端请求新建该路径', new_path)\n if os.path.exists(new_path):\n info['num'] = 601\n print('要创建的目录已存在')\n else:\n info['num'] = 600\n os.makedirs(new_path)\n print('创建新目录', new_path)\n self.request.send(json.dumps(info).encode('utf-8'))\n","sub_path":"server/core/server_ftp.py","file_name":"server_ftp.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"496337441","text":"from flask import Flask, request, jsonify\nfrom dbSetup import items\nfrom bson.objectid import ObjectId\nfrom pymongo import ReturnDocument\nfrom flask_cors import CORS\nfrom models import Item\n\napp = Flask(__name__)\n\n\nCORS(app)\n\n@app.route('/items')\ndef get_all_items():\n \"\"\"Route to get all items\"\"\"\n\n allItems = list(items.find())\n for item in allItems:\n # convert ObjectId from MongoDb to string\n id_to_string(item)\n \n return jsonify(allItems)\n\n@app.route('/items/')\ndef get_item(id):\n \"\"\"Route to get item by id\"\"\"\n\n item = items.find_one({'_id': ObjectId(id)})\n\n # convert ObjectId from MongoDb to string\n id_to_string(item)\n \n return item\n\n@app.route('/items', methods=['POST'])\ndef create_item():\n \"\"\"Route to create an item\"\"\"\n \n data = request.get_json()\n\n item = Item(\n description = data.get('description'),\n price = data.get('price'),\n type = data.get('type'),\n img = data.get('img'),\n quantity = data.get('quantity'),\n name = data.get('name')\n )\n\n # get image name\n name = item.get_img_name()\n\n # save image on server\n base64Image = data.get('imgFile')\n Item.save_img(base64Image, name)\n \n #save item in db\n result = items.insert_one(item.__dict__)\n item = items.find_one({'_id': result.inserted_id})\n id_to_string(item)\n return item\n\n@app.route('/items/', methods=['DELETE'])\ndef delete_item(id):\n \"\"\"Route to delete an item by id\"\"\"\n\n items.delete_one({'_id': ObjectId(id)})\n\n return \"Success\"\n\n@app.route('/items/', methods=['PATCH'])\ndef update_item(id):\n \"\"\"Route to update an item\"\"\"\n\n data = request.get_json()\n\n item = items.find_one_and_update({'_id': ObjectId(id)}, {'$set': data}, return_document=ReturnDocument.AFTER)\n\n # convert ObjectId from MongoDb to string\n id_to_string(item)\n \n return item\n\n\ndef id_to_string(item):\n item['_id'] = str(item['_id'])\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"373501082","text":"'''\nCreated on Oct 7, 2012\n\n@author: erkki\n'''\nfrom p1.Perceptron import Perceptron\nfrom util.DataFetcher import DataFetcher\nfrom util.LoggerFetcher import LoggerFetcher\n\nclass Main(object):\n \n def __init__(self):\n self.log = LoggerFetcher().fetchLogger(\"p1\", \"main\")\n \n def classifyDataSet(self, dataSetName, perceptron):\n \n self.log.info('Classifying dataset %s', dataSetName)\n container = DataFetcher().fetchDataSet(dataSetName)\n labels = container.getLabels()\n samples = container.getDataVectors()\n \n errors = 0\n for i, sample in enumerate(samples):\n \n cls = perceptron.classify(sample) \n if cls != labels[i]:\n errors += 1\n self.log.info('Misclassified sample %s as %s', labels[i], cls)\n \n setSize = container.getDataSetSize()\n \n errorRate = errors * 1.0 / setSize * 100\n self.log.info('Error rate while classifying was %s', errorRate) \n\n def trainPerceptron(self, dataSetName, alpha, iterations):\n container = DataFetcher().fetchDataSet(dataSetName)\n perceptron = Perceptron(container, 0.1, 15)\n perceptron.train()\n \n return perceptron\n \n def main(self): \n dataSets = ['buffer_dataset', 'inverter_dataset']\n \n for dataSetName in dataSets: \n perceptron = self.trainPerceptron(dataSetName, 0.1, 15)\n self.classifyDataSet(dataSetName, perceptron)\n \nif __name__ == '__main__':\n Main().main()\n","sub_path":"neural/p1/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"10248167","text":"import os\nimport copy\nimport numpy as np\n\nclass Configurations:\n mag_10sqdeg = {\n 'pyconfig': os.path.join(os.environ['BALROG_PYCONFIG'], 'r50_r90_coords.py'),\n #'label': 'mag_10sqdeg',\n 'outdir': os.environ['BALROG_DEFAULT_OUT'],\n 'magimage':'/astro/u/esuchyta/git_repos/BalrogSetupBNL/wrappers/SingleJob/magfield.fits',\n\n 'compressed': True,\n 'clean': True,\n 'fullclean': True,\n\n 'ntot': 300000, \n #'ntot': 1000, \n 'ngal': 1000,\n \n #'nmin': str(0.3),\n #'nmax': str(6.0),\n #'dn': str(0.1),\n #'ntype': 'lin',\n #'label': 'nobin',\n\n 'presex': True,\n 'fitstype': 'ldac',\n 'sexnnw': os.path.join(os.environ['DESDM_CONFIG_SVA1'], 'sex.nnw'),\n 'sexconv': os.path.join(os.environ['DESDM_CONFIG_SVA1'], 'sex.conv'),\n 'sexpath': '/direct/astro+u/esuchyta/svn_repos/sextractor-2.18.10/install/bin/sex',\n 'sexparam': '/direct/astro+u/esuchyta/git_repos/BalrogSetupBNL/suchyta_config/single_n.param',\n 'sexconfig': '/direct/astro+u/esuchyta/git_repos/BalrogSetupBNL/suchyta_config/r50_r90.config'\n }\n\n mag_desdm = {\n 'pyconfig': os.path.join(os.environ['BALROG_PYCONFIG'], 'mag_desdm.py'),\n 'outdir': os.environ['BALROG_DEFAULT_OUT'],\n\n 'compressed': True,\n 'clean': True,\n 'fullclean': True,\n\n 'ntot': 300000, \n 'ngal': 1000,\n\n #'label': 'nomag_desdm',\n #'magnification': 0.0, \n\n 'label': 'mag_desdm',\n 'magnification': 0.01, \n\n 'presex': True,\n 'fitstype': 'ldac',\n 'sexnnw': os.path.join(os.environ['DESDM_CONFIG_SVA1'], 'sex.nnw'),\n 'sexconv': os.path.join(os.environ['DESDM_CONFIG_SVA1'], 'sex.conv'),\n 'sexpath': '/direct/astro+u/esuchyta/svn_repos/sextractor-2.18.10/install/bin/sex',\n\n 'sexparam': '/direct/astro+u/esuchyta/git_repos/BalrogSetupBNL/DESDM_config/sva1/sex.param_diskonly',\n 'sexconfig': '/direct/astro+u/esuchyta/git_repos/BalrogSetupBNL/DESDM_config/sva1/sex.config'\n }\n\n\n\nclass TileLists:\n suchyta13 = ['DES0415-4831',\n 'DES0419-4831',\n 'DES0423-4831',\n 'DES0427-4831',\n 'DES0432-4831',\n 'DES0436-4831',\n 'DES0440-4831',\n 'DES0445-4831',\n 'DES0449-4831',\n 'DES0453-4831',\n 'DES0458-4831',\n 'DES0502-4831',\n 'DES0506-4831']\n\n suchyta14 = ['DES0411-4748',\n 'DES0415-4748',\n 'DES0419-4748',\n 'DES0423-4748',\n 'DES0428-4748',\n 'DES0432-4748',\n 'DES0436-4748',\n 'DES0440-4748',\n 'DES0445-4748',\n 'DES0449-4748',\n 'DES0453-4748',\n 'DES0457-4748',\n 'DES0502-4748',\n 'DES0506-4748']\n\n suchyta27 = np.append( np.array(suchyta13), np.array(suchyta14) )\n\n\n\nclass SheldonInfo:\n sva1_coadd = {\n 'release': 'sva1_coadd',\n 'filetype': 'coadd_image',\n 'runkey': 'coadd_run',\n }\n","sub_path":"wrappers/SingleJob/runconfigs.py","file_name":"runconfigs.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196150255","text":"import detaset\r\nimport re\r\n\r\namino=detaset.amino()\r\nsep=detaset.sep_str()\r\n\r\ndef translate(read_seq):\r\n result=make_codon_set(read_seq)\r\n return result\r\n\r\ndef make_codon_set(read_seq):\r\n # start = read_seq.find(\"AUG\")\r\n start = re.finditer(r\"AUG\",read_seq)\r\n dic=[]\r\n for s in start:\r\n tmp=make_codon(read_seq,s.start())\r\n dic.append(tmp)\r\n return dic\r\n\r\ndef make_codon(read_seq,start):\r\n k=3\r\n result=[]\r\n pre_codon = read_seq[start::]\r\n result.append(str(start))\r\n for i in range(0, len(pre_codon), k):\r\n codon = pre_codon[i:i+k]\r\n if codon in amino[\"x\"]:\r\n result.append(str(start+i))\r\n return formatResult(result,read_seq)\r\n elif len(codon)<3 :\r\n result.append(str(start+i))\r\n return formatResult(result,read_seq)\r\n else:\r\n result.append(codon)\r\n return formatResult(result,read_seq)\r\n\r\ndef read_codon(codon_list):\r\n result =[]\r\n for i in range(0,len(codon_list)):\r\n aminosan=codon_list[i]\r\n aminosan_check = [k for k, v in amino.items() if codon_list[i] in v]\r\n if len(aminosan_check) != 0:\r\n aminosan = aminosan_check[0]\r\n result.append(aminosan)\r\n return result\r\n\r\ndef formatResult(result,read_seq):\r\n codon=result\r\n prot=read_codon(result)\r\n return {'codon':sep.join(codon),'protain':sep.join(prot),'read':read_seq}","sub_path":"translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"459596080","text":"import PIL.Image as im\r\nimport numpy as np\r\n\r\nimage = im.open('image.png')\r\nimage = np.array(image)\r\n\r\n\r\nfor i in range(10):\r\n for j in range(256):\r\n image[i][j] = [0, 0, 0, 0]\r\n\r\nsortie = im.fromarray(image)\r\nsortie.save('image_copie.png')","sub_path":"Exercice 6.py","file_name":"Exercice 6.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"200616513","text":"print(\"Loading Python IL Module\")\n\n# PILgraph is here because both the black and white and colour screens need it.\n\nfrom objects import *\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\nimport numpy\nfrom array import *\n\n# The following class is used to prepare sensordata for display on the graph and draw it to the screen.\nclass graphlist(object):\n\n\t# the following is constructor code to give each object a list suitable for storing all our graph data.\n\tdef __init__(self, sourcerange, graphcoords, graphspan, cycle = 0, colour = 0, width = 1):\n\t\tself.new = True\n\t\tself.cycle = cycle\n\t\tself.tock = timer()\n\t\tself.tock.logtime()\n\t\tself.glist = array('f', [])\n\t\tself.dlist = array('f', [])\n\t\tself.colour = colour\n\t\tself.auto = True\n\t\tself.width = width\n\t\tself.dotw = 6\n\t\tself.doth = 6\n\n\t\tself.datahigh = 0\n\t\tself.datalow = 0\n\t\tself.newrange = (self.datalow,self.datahigh)\n\n\t\t# collect data for translating sensor readings into pixel locations\n\t\tself.sourcerange = sourcerange\n\t\tself.low,self.high = self.sourcerange\n\n\t\t# collect data for where the graph should be drawn to screen.\n\t\tself.x, self.y = graphcoords\n\t\tself.spanx,self.spany = graphspan\n\n\t\tself.newx,self.newy = graphcoords\n\t\tself.newspanx,self.newspany = graphspan\n\n\t\tself.targetrange = ((self.y + self.spany), self.y)\n\n\t\t# seeds a list with the coordinates for 0 to give us a list that we can put our scaled graph values in\n\t\tfor i in range(self.spanx):\n\t\t\tself.glist.append(self.y + self.spany)\n\n\t\t# seeds a list with sourcerange zero so we can put our sensor readings into it.\n\t\tfor i in range(self.spanx):\n\t\t\tself.dlist.append(self.low)\n\n\n\t# the following function returns the graph list.\n\tdef grabglist(self):\n\t\treturn self.glist\n\t# the following function returns the data list.\n\tdef grabdlist(self):\n\t\treturn self.dlist\n\n\t# Returns the average of the current dataset\n\tdef get_average(self):\n\t\taverage = sum(self.buff) / len(self.buff)\n\t\treturn average\n\n\tdef get_high(self):\n\t\treturn max(self.buff)\n\n\tdef get_low(self):\n\t\treturn min(self.buff)\n\n\t# this function calculates the approximate time scale of the graph\n\tdef giveperiod(self):\n\t\tself.period = (self.spanx * self.cycle) / 60\n\n\t\treturn self.period\n\n\t# the following appends data to the list.\n\n\tdef update(self, data):\n\t\t# grabs a tuple to hold our values\n\t\tself.buff = self.grabdlist()\n\n\n\t\t# if the time elapsed has reached the set interval then collect data\n\t\tif self.tock.timelapsed() >= self.cycle:\n\n\t\t\t# we load new data from the caller\n\t\t\tself.cleandata = data\n\n\t\t\t#append it to our list of clean data\n\t\t\tself.buff.append(self.cleandata)\n\n\t\t\t#pop the oldest value off\n\t\t\t# may remove this\n\t\t\tself.buff.pop(0)\n\t\t\tself.tock.logtime()\n\n\n\n\t# the following pairs the list of values with coordinates on the X axis. The supplied variables are the starting X coordinates and spacing between each point.\n\t# if the auto flad is set then the class will autoscale the graph so that the highest and lowest currently displayed values are presented.\n\tdef graphprep(self,datalist):\n\t\tself.linepoint = self.x\n\t\tself.jump = 1\n\t\tself.newlist = []\n\n\n\t\tself.datahigh = max(self.dlist)\n\t\tself.datalow = min(self.dlist)\n\t\tself.newrange = (self.datalow,self.datahigh)\n\n\t\tfor i in range(self.spanx):\n\t\t\tif self.auto == True:\n\t\t\t\tscaledata = numpy.interp(datalist[i],self.newrange,self.targetrange)#self.translate(datalist[i], self.newrange, self.targetrange)\n\t\t\telse:\n\t\t\t\tscaledata = self.translate(datalist[i], self.sourcerange, self.targetrange)\n\n\t\t\tself.newlist.append((self.linepoint,scaledata))\n\t\t\tself.linepoint = self.linepoint + self.jump\n\n\t\treturn self.newlist\n\n\t# the following function maps a value from the target range onto the desination range\n\tdef translate(self,value,source,target):\n\t\t# Figure out how 'wide' each range is\n\n\t\tleftMax,leftMin = source\n\t\trightMin,rightMax = target\n\n\t\tleftSpan = leftMax - leftMin\n\t\trightSpan = rightMax - rightMin\n\n\t\t# Convert the left range into a 0-1 range (float)\n\t\tif leftSpan == 0:\n\t\t\treturn rightMin + rightSpan / 2\n\n\t\tvalueScaled = float(value - leftMin) / float(leftSpan)\n\n\t\t# Convert the 0-1 range into a value in the right range.\n\t\treturn rightMin + (valueScaled * rightSpan)\n\n\tdef render(self, draw, auto = True, dot = True):\n\n\t\tself.auto = configure.auto[0]\n\n\t\t#preps the list by adding the X coordinate to every sensor value\n\t\tcords = self.graphprep(self.buff)\n\n\t\t# draws the line graph\n\t\tdraw.line(cords,self.colour,self.width)\n\n\n\t\tif dot:\n\t\t\tx1 = cords[-1][0] - (self.dotw/2)\n\t\t\ty1 = cords[-1][1] - (self.doth/2)\n\t\t\tx2 = cords[-1][0] + (self.dotw/2)\n\t\t\ty2 = cords[-1][1] + (self.doth/2)\n\t\t\tdraw.ellipse([x1,y1,x2,y2],self.colour)\n","sub_path":"pilgraph.py","file_name":"pilgraph.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"563563501","text":"#!/usr/bin/env python\n\n# standard library modules, , ,\nimport unittest\nimport os\nimport subprocess\nfrom collections import namedtuple\n\n# version, , represent versions and specifications, internal\nfrom yotta.lib import version\n# settings, , load and save settings, internal\nfrom yotta.lib import settings\n# install, , install components, internal\nfrom yotta import install\n\n\nTest_Name = 'testing-dummy'\nTest_Deps_Name = \"autopulated/github-access-testing\"\nTest_Deps_Target = \"x86-osx,*\"\nTest_Username = 'yottatest'\nTest_Access_Token = 'c53aadbd89caefdcadb0d43d18ef863e1d9cbcf4'\n\ndef ensureGithubConfig():\n # ensure we have authentication for the test github account\n if not settings.getProperty('github', 'authtoken'):\n settings.setProperty('github', 'authtoken', Test_Access_Token)\n\n\nclass TestGitHubAccess(unittest.TestCase):\n def setUp(self):\n ensureGithubConfig()\n \n def tearDown(self):\n pass\n\n def test_installDeps(self):\n Args = namedtuple('Args', ['component', 'target', 'act_globally', 'install_linked', 'save', 'save_target'])\n install.installComponent(Args(Test_Deps_Name, Test_Deps_Target, False, False, False, False))\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n","sub_path":"yotta/test/github_access.py","file_name":"github_access.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"96438016","text":"file = open('engmix.txt')\nword = input('Enter a word: ')\n\nend = 0\nfor line in file:\n if word == line.strip():\n print(word, 'is in the dictionary')\n end+=1\n break\n\nif end == 0:\n print(word, 'is not in the dictionary')","sub_path":"askWord.py","file_name":"askWord.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"402453238","text":"import pywt\r\nimport matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\nmode = ['zero', 'constant', 'symmetric', 'reflect', 'periodic', 'smooth', 'periodization']\r\n\"\"\"\r\n\r\n\r\ndef waveletdec(signal, coef_type='d', wname='sym7', level=7, mode='symmetric'):\r\n N = len(signal)\r\n w = pywt.Wavelet(wname)\r\n a = signal\r\n ca = []\r\n cd = []\r\n for i in range(level):\r\n (a, d) = pywt.dwt(a, w, mode)\r\n ca.append(a)\r\n cd.append(d)\r\n rec_a = []\r\n rec_d = []\r\n for i, coeff in enumerate(ca):\r\n coeff_list = [coeff, None] + [None] * i\r\n rec_a.append(pywt.waverec(coeff_list, w)[0:N])\r\n for i, coeff in enumerate(cd):\r\n coeff_list = [None, coeff] + [None] * i\r\n rec_d.append(pywt.waverec(coeff_list, w)[0:N])\r\n if coef_type == 'd':\r\n return rec_d\r\n return rec_a\r\n\r\n\r\nif __name__ == \"__main__\":\r\n plt.rcParams['font.sans-serif'] = ['SimHei']\r\n plt.rcParams['axes.unicode_minus'] = False\r\n s = [0, 1, 2, 3, 4, 5, 6, 7, 8]\r\n d = waveletdec(s, 'd', 'sym3', 3)\r\n a = waveletdec(s, 'a', 'sym3', 3)\r\n plt.subplot(3, 1, 1)\r\n plt.plot(s)\r\n plt.title('data')\r\n plt.subplot(3, 1, 2)\r\n r = d[0] + d[1] + d[2] + a[2]\r\n plt.plot(r)\r\n plt.title('rec data')\r\n plt.subplot(3, 1, 3)\r\n plt.plot(s - r)\r\n plt.title('error')\r\n plt.show()\r\n","sub_path":"test3_wl.py","file_name":"test3_wl.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"406066733","text":"# Copyright 2015 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\nfrom sqlalchemy.orm import exc as db_exceptions\nfrom stevedore import driver as stevedore_driver\nimport tenacity\n\nfrom octavia.api.drivers import utils as provider_utils\nfrom octavia.common import base_taskflow\nfrom octavia.common import constants\nfrom octavia.controller.worker.v2.flows import flow_utils\nfrom octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver\nfrom octavia.db import api as db_apis\nfrom octavia.db import repositories as repo\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\nRETRY_ATTEMPTS = 15\nRETRY_INITIAL_DELAY = 1\nRETRY_BACKOFF = 1\nRETRY_MAX = 5\n\n\ndef _is_provisioning_status_pending_update(lb_obj):\n return not lb_obj.provisioning_status == constants.PENDING_UPDATE\n\n\nclass ControllerWorker(object):\n\n def __init__(self):\n\n self._amphora_repo = repo.AmphoraRepository()\n self._amphora_health_repo = repo.AmphoraHealthRepository()\n self._health_mon_repo = repo.HealthMonitorRepository()\n self._lb_repo = repo.LoadBalancerRepository()\n self._listener_repo = repo.ListenerRepository()\n self._member_repo = repo.MemberRepository()\n self._pool_repo = repo.PoolRepository()\n self._l7policy_repo = repo.L7PolicyRepository()\n self._l7rule_repo = repo.L7RuleRepository()\n self._flavor_repo = repo.FlavorRepository()\n self._az_repo = repo.AvailabilityZoneRepository()\n\n persistence = tsk_driver.MysqlPersistenceDriver()\n\n self.jobboard_driver = stevedore_driver.DriverManager(\n namespace='octavia.worker.jobboard_driver',\n name=CONF.task_flow.jobboard_backend_driver,\n invoke_args=(persistence,),\n invoke_on_load=True).driver\n\n @tenacity.retry(\n retry=(\n tenacity.retry_if_result(_is_provisioning_status_pending_update) |\n tenacity.retry_if_exception_type()),\n wait=tenacity.wait_incrementing(\n RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),\n stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))\n def _get_db_obj_until_pending_update(self, repo, id):\n\n return repo.get(db_apis.get_session(), id=id)\n\n @property\n def services_controller(self):\n return base_taskflow.TaskFlowServiceController(self.jobboard_driver)\n\n def create_amphora(self, availability_zone=None):\n \"\"\"Creates an Amphora.\n\n This is used to create spare amphora.\n\n :returns: uuid\n \"\"\"\n try:\n store = {constants.BUILD_TYPE_PRIORITY:\n constants.LB_CREATE_SPARES_POOL_PRIORITY,\n constants.FLAVOR: None,\n constants.AVAILABILITY_ZONE: None}\n if availability_zone:\n store[constants.AVAILABILITY_ZONE] = (\n self._az_repo.get_availability_zone_metadata_dict(\n db_apis.get_session(), availability_zone))\n job_id = self.services_controller.run_poster(\n flow_utils.get_create_amphora_flow,\n store=store, wait=True)\n\n return job_id\n except Exception as e:\n LOG.error('Failed to create an amphora due to: {}'.format(str(e)))\n\n def delete_amphora(self, amphora_id):\n \"\"\"Deletes an existing Amphora.\n\n :param amphora_id: ID of the amphora to delete\n :returns: None\n :raises AmphoraNotFound: The referenced Amphora was not found\n \"\"\"\n amphora = self._amphora_repo.get(db_apis.get_session(),\n id=amphora_id)\n store = {constants.AMPHORA: amphora.to_dict()}\n self.services_controller.run_poster(\n flow_utils.get_delete_amphora_flow,\n store=store)\n\n @tenacity.retry(\n retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),\n wait=tenacity.wait_incrementing(\n RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),\n stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))\n def create_health_monitor(self, health_monitor):\n \"\"\"Creates a health monitor.\n\n :param health_monitor: Provider health monitor dict\n :returns: None\n :raises NoResultFound: Unable to find the object\n \"\"\"\n db_health_monitor = self._health_mon_repo.get(\n db_apis.get_session(),\n id=health_monitor[constants.HEALTHMONITOR_ID])\n\n pool = db_health_monitor.pool\n pool.health_monitor = db_health_monitor\n load_balancer = pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n pool.listeners))\n\n store = {constants.HEALTH_MON: health_monitor,\n constants.POOL_ID: pool.id,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb}\n self.services_controller.run_poster(\n flow_utils.get_create_health_monitor_flow,\n store=store)\n\n def delete_health_monitor(self, health_monitor):\n \"\"\"Deletes a health monitor.\n\n :param health_monitor: Provider health monitor dict\n :returns: None\n :raises HMNotFound: The referenced health monitor was not found\n \"\"\"\n db_health_monitor = self._health_mon_repo.get(\n db_apis.get_session(),\n id=health_monitor[constants.HEALTHMONITOR_ID])\n\n pool = db_health_monitor.pool\n load_balancer = pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n pool.listeners))\n\n store = {constants.HEALTH_MON: health_monitor,\n constants.POOL_ID: pool.id,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb,\n constants.PROJECT_ID: load_balancer.project_id}\n self.services_controller.run_poster(\n flow_utils.get_delete_health_monitor_flow,\n store=store)\n\n def update_health_monitor(self, original_health_monitor,\n health_monitor_updates):\n \"\"\"Updates a health monitor.\n\n :param original_health_monitor: Provider health monitor dict\n :param health_monitor_updates: Dict containing updated health monitor\n :returns: None\n :raises HMNotFound: The referenced health monitor was not found\n \"\"\"\n try:\n db_health_monitor = self._get_db_obj_until_pending_update(\n self._health_mon_repo,\n original_health_monitor[constants.HEALTHMONITOR_ID])\n except tenacity.RetryError as e:\n LOG.warning('Health monitor did not go into %s in 60 seconds. '\n 'This either due to an in-progress Octavia upgrade '\n 'or an overloaded and failing database. Assuming '\n 'an upgrade is in progress and continuing.',\n constants.PENDING_UPDATE)\n db_health_monitor = e.last_attempt.result()\n\n pool = db_health_monitor.pool\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n pool.listeners))\n\n load_balancer = pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n store = {constants.HEALTH_MON: original_health_monitor,\n constants.POOL_ID: pool.id,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb,\n constants.UPDATE_DICT: health_monitor_updates}\n self.services_controller.run_poster(\n flow_utils.get_update_health_monitor_flow,\n store=store)\n\n @tenacity.retry(\n retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),\n wait=tenacity.wait_incrementing(\n RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),\n stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))\n def create_listener(self, listener):\n \"\"\"Creates a listener.\n\n :param listener: A listener provider dictionary.\n :returns: None\n :raises NoResultFound: Unable to find the object\n \"\"\"\n db_listener = self._listener_repo.get(\n db_apis.get_session(), id=listener[constants.LISTENER_ID])\n if not db_listener:\n LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '\n '60 seconds.', 'listener',\n listener[constants.LISTENER_ID])\n raise db_exceptions.NoResultFound\n\n load_balancer = db_listener.load_balancer\n listeners = load_balancer.listeners\n dict_listeners = []\n for li in listeners:\n dict_listeners.append(\n provider_utils.db_listener_to_provider_listener(li).to_dict())\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n store = {constants.LISTENERS: dict_listeners,\n constants.LOADBALANCER: provider_lb,\n constants.LOADBALANCER_ID: load_balancer.id}\n\n self.services_controller.run_poster(\n flow_utils.get_create_listener_flow,\n store=store)\n\n def delete_listener(self, listener):\n \"\"\"Deletes a listener.\n\n :param listener: A listener provider dictionary to delete\n :returns: None\n :raises ListenerNotFound: The referenced listener was not found\n \"\"\"\n # TODO(johnsom) Remove once the provider data model includes\n # the project ID\n lb = self._lb_repo.get(db_apis.get_session(),\n id=listener[constants.LOADBALANCER_ID])\n store = {constants.LISTENER: listener,\n constants.LOADBALANCER_ID:\n listener[constants.LOADBALANCER_ID],\n constants.PROJECT_ID: lb.project_id}\n self.services_controller.run_poster(\n flow_utils.get_delete_listener_flow,\n store=store)\n\n def update_listener(self, listener, listener_updates):\n \"\"\"Updates a listener.\n\n :param listener: A listener provider dictionary to update\n :param listener_updates: Dict containing updated listener attributes\n :returns: None\n :raises ListenerNotFound: The referenced listener was not found\n \"\"\"\n db_lb = self._lb_repo.get(db_apis.get_session(),\n id=listener[constants.LOADBALANCER_ID])\n store = {constants.LISTENER: listener,\n constants.UPDATE_DICT: listener_updates,\n constants.LOADBALANCER_ID: db_lb.id,\n constants.LISTENERS: [listener]}\n self.services_controller.run_poster(\n flow_utils.get_update_listener_flow,\n store=store)\n\n @tenacity.retry(\n retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),\n wait=tenacity.wait_incrementing(\n RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),\n stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))\n def create_load_balancer(self, loadbalancer, flavor=None,\n availability_zone=None):\n \"\"\"Creates a load balancer by allocating Amphorae.\n\n First tries to allocate an existing Amphora in READY state.\n If none are available it will attempt to build one specifically\n for this load balancer.\n\n :param loadbalancer: The dict of load balancer to create\n :returns: None\n :raises NoResultFound: Unable to find the object\n \"\"\"\n lb = self._lb_repo.get(db_apis.get_session(),\n id=loadbalancer[constants.LOADBALANCER_ID])\n if not lb:\n LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '\n '60 seconds.', 'load_balancer',\n loadbalancer[constants.LOADBALANCER_ID])\n raise db_exceptions.NoResultFound\n\n # TODO(johnsom) convert this to octavia_lib constant flavor\n # once octavia is transitioned to use octavia_lib\n store = {constants.LOADBALANCER_ID:\n loadbalancer[constants.LOADBALANCER_ID],\n constants.BUILD_TYPE_PRIORITY:\n constants.LB_CREATE_NORMAL_PRIORITY,\n constants.FLAVOR: flavor,\n constants.AVAILABILITY_ZONE: availability_zone}\n\n topology = lb.topology\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n lb.listeners)\n )\n\n store[constants.UPDATE_DICT] = {\n constants.TOPOLOGY: topology\n }\n self.services_controller.run_poster(\n flow_utils.get_create_load_balancer_flow,\n topology, listeners=listeners_dicts,\n store=store)\n\n def delete_load_balancer(self, load_balancer, cascade=False):\n \"\"\"Deletes a load balancer by de-allocating Amphorae.\n\n :param load_balancer: Dict of the load balancer to delete\n :returns: None\n :raises LBNotFound: The referenced load balancer was not found\n \"\"\"\n db_lb = self._lb_repo.get(db_apis.get_session(),\n id=load_balancer[constants.LOADBALANCER_ID])\n store = {constants.LOADBALANCER: load_balancer,\n constants.SERVER_GROUP_ID: db_lb.server_group_id,\n constants.PROJECT_ID: db_lb.project_id}\n if cascade:\n store.update(flow_utils.get_delete_pools_store(db_lb))\n store.update(flow_utils.get_delete_listeners_store(db_lb))\n self.services_controller.run_poster(\n flow_utils.get_cascade_delete_load_balancer_flow,\n load_balancer, store=store)\n else:\n self.services_controller.run_poster(\n flow_utils.get_delete_load_balancer_flow,\n load_balancer, store=store)\n\n def update_load_balancer(self, original_load_balancer,\n load_balancer_updates):\n \"\"\"Updates a load balancer.\n\n :param original_load_balancer: Dict of the load balancer to update\n :param load_balancer_updates: Dict containing updated load balancer\n :returns: None\n :raises LBNotFound: The referenced load balancer was not found\n \"\"\"\n store = {constants.LOADBALANCER: original_load_balancer,\n constants.LOADBALANCER_ID:\n original_load_balancer[constants.LOADBALANCER_ID],\n constants.UPDATE_DICT: load_balancer_updates}\n\n self.services_controller.run_poster(\n flow_utils.get_update_load_balancer_flow,\n store=store)\n\n def create_member(self, member):\n \"\"\"Creates a pool member.\n\n :param member: A member provider dictionary to create\n :returns: None\n :raises NoSuitablePool: Unable to find the node pool\n \"\"\"\n pool = self._pool_repo.get(db_apis.get_session(),\n id=member[constants.POOL_ID])\n load_balancer = pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n pool.listeners))\n\n store = {\n constants.MEMBER: member,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb,\n constants.POOL_ID: pool.id}\n if load_balancer.availability_zone:\n store[constants.AVAILABILITY_ZONE] = (\n self._az_repo.get_availability_zone_metadata_dict(\n db_apis.get_session(), load_balancer.availability_zone))\n else:\n store[constants.AVAILABILITY_ZONE] = {}\n\n self.services_controller.run_poster(\n flow_utils.get_create_member_flow,\n store=store)\n\n def delete_member(self, member):\n \"\"\"Deletes a pool member.\n\n :param member: A member provider dictionary to delete\n :returns: None\n :raises MemberNotFound: The referenced member was not found\n \"\"\"\n pool = self._pool_repo.get(db_apis.get_session(),\n id=member[constants.POOL_ID])\n\n load_balancer = pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n pool.listeners))\n\n store = {\n constants.MEMBER: member,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb,\n constants.POOL_ID: pool.id,\n constants.PROJECT_ID: load_balancer.project_id}\n if load_balancer.availability_zone:\n store[constants.AVAILABILITY_ZONE] = (\n self._az_repo.get_availability_zone_metadata_dict(\n db_apis.get_session(), load_balancer.availability_zone))\n else:\n store[constants.AVAILABILITY_ZONE] = {}\n\n self.services_controller.run_poster(\n flow_utils.get_delete_member_flow,\n store=store)\n\n def batch_update_members(self, old_members, new_members,\n updated_members):\n updated_members = [\n (provider_utils.db_member_to_provider_member(\n self._member_repo.get(db_apis.get_session(),\n id=m.get(constants.ID))).to_dict(),\n m)\n for m in updated_members]\n provider_old_members = [\n provider_utils.db_member_to_provider_member(\n self._member_repo.get(db_apis.get_session(),\n id=m.get(constants.ID))).to_dict()\n for m in old_members]\n if old_members:\n pool = self._pool_repo.get(db_apis.get_session(),\n id=old_members[0][constants.POOL_ID])\n elif new_members:\n pool = self._pool_repo.get(db_apis.get_session(),\n id=new_members[0][constants.POOL_ID])\n else:\n pool = self._pool_repo.get(\n db_apis.get_session(),\n id=updated_members[0][0][constants.POOL_ID])\n load_balancer = pool.load_balancer\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n pool.listeners))\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n store = {\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb,\n constants.POOL_ID: pool.id,\n constants.PROJECT_ID: load_balancer.project_id}\n if load_balancer.availability_zone:\n store[constants.AVAILABILITY_ZONE] = (\n self._az_repo.get_availability_zone_metadata_dict(\n db_apis.get_session(), load_balancer.availability_zone))\n else:\n store[constants.AVAILABILITY_ZONE] = {}\n\n self.services_controller.run_poster(\n flow_utils.get_batch_update_members_flow,\n provider_old_members, new_members, updated_members,\n store=store)\n\n def update_member(self, member, member_updates):\n \"\"\"Updates a pool member.\n\n :param member_id: A member provider dictionary to update\n :param member_updates: Dict containing updated member attributes\n :returns: None\n :raises MemberNotFound: The referenced member was not found\n \"\"\"\n # TODO(ataraday) when other flows will use dicts - revisit this\n pool = self._pool_repo.get(db_apis.get_session(),\n id=member[constants.POOL_ID])\n load_balancer = pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n pool.listeners))\n\n store = {\n constants.MEMBER: member,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb,\n constants.POOL_ID: pool.id,\n constants.UPDATE_DICT: member_updates}\n if load_balancer.availability_zone:\n store[constants.AVAILABILITY_ZONE] = (\n self._az_repo.get_availability_zone_metadata_dict(\n db_apis.get_session(), load_balancer.availability_zone))\n else:\n store[constants.AVAILABILITY_ZONE] = {}\n\n self.services_controller.run_poster(\n flow_utils.get_update_member_flow,\n store=store)\n\n @tenacity.retry(\n retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),\n wait=tenacity.wait_incrementing(\n RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),\n stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))\n def create_pool(self, pool):\n \"\"\"Creates a node pool.\n\n :param pool: Provider pool dict to create\n :returns: None\n :raises NoResultFound: Unable to find the object\n \"\"\"\n\n # TODO(ataraday) It seems we need to get db pool here anyway to get\n # proper listeners\n db_pool = self._pool_repo.get(db_apis.get_session(),\n id=pool[constants.POOL_ID])\n if not db_pool:\n LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '\n '60 seconds.', 'pool', pool[constants.POOL_ID])\n raise db_exceptions.NoResultFound\n\n load_balancer = db_pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n db_pool.listeners))\n\n store = {constants.POOL_ID: pool[constants.POOL_ID],\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.LOADBALANCER: provider_lb}\n self.services_controller.run_poster(\n flow_utils.get_create_pool_flow,\n store=store)\n\n def delete_pool(self, pool):\n \"\"\"Deletes a node pool.\n\n :param pool: Provider pool dict to delete\n :returns: None\n :raises PoolNotFound: The referenced pool was not found\n \"\"\"\n db_pool = self._pool_repo.get(db_apis.get_session(),\n id=pool[constants.POOL_ID])\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n db_pool.listeners))\n load_balancer = db_pool.load_balancer\n\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n store = {constants.POOL_ID: pool[constants.POOL_ID],\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER: provider_lb,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.PROJECT_ID: db_pool.project_id}\n self.services_controller.run_poster(\n flow_utils.get_delete_pool_flow,\n store=store)\n\n def update_pool(self, origin_pool, pool_updates):\n \"\"\"Updates a node pool.\n\n :param origin_pool: Provider pool dict to update\n :param pool_updates: Dict containing updated pool attributes\n :returns: None\n :raises PoolNotFound: The referenced pool was not found\n \"\"\"\n try:\n db_pool = self._get_db_obj_until_pending_update(\n self._pool_repo, origin_pool[constants.POOL_ID])\n except tenacity.RetryError as e:\n LOG.warning('Pool did not go into %s in 60 seconds. '\n 'This either due to an in-progress Octavia upgrade '\n 'or an overloaded and failing database. Assuming '\n 'an upgrade is in progress and continuing.',\n constants.PENDING_UPDATE)\n db_pool = e.last_attempt.result()\n\n load_balancer = db_pool.load_balancer\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n load_balancer).to_dict()\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n db_pool.listeners))\n\n store = {constants.POOL_ID: db_pool.id,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER: provider_lb,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.UPDATE_DICT: pool_updates}\n self.services_controller.run_poster(\n flow_utils.get_update_pool_flow,\n store=store)\n\n def create_l7policy(self, l7policy):\n \"\"\"Creates an L7 Policy.\n\n :param l7policy: Provider dict of the l7policy to create\n :returns: None\n :raises NoResultFound: Unable to find the object\n \"\"\"\n db_listener = self._listener_repo.get(\n db_apis.get_session(), id=l7policy[constants.LISTENER_ID])\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n [db_listener]))\n\n store = {constants.L7POLICY: l7policy,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: db_listener.load_balancer.id\n }\n self.services_controller.run_poster(\n flow_utils.get_create_l7policy_flow,\n store=store)\n\n def delete_l7policy(self, l7policy):\n \"\"\"Deletes an L7 policy.\n\n :param l7policy: Provider dict of the l7policy to delete\n :returns: None\n :raises L7PolicyNotFound: The referenced l7policy was not found\n \"\"\"\n db_listener = self._listener_repo.get(\n db_apis.get_session(), id=l7policy[constants.LISTENER_ID])\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n [db_listener]))\n\n store = {constants.L7POLICY: l7policy,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: db_listener.load_balancer.id\n }\n self.services_controller.run_poster(\n flow_utils.get_delete_l7policy_flow,\n store=store)\n\n def update_l7policy(self, original_l7policy, l7policy_updates):\n \"\"\"Updates an L7 policy.\n\n :param l7policy: Provider dict of the l7policy to update\n :param l7policy_updates: Dict containing updated l7policy attributes\n :returns: None\n :raises L7PolicyNotFound: The referenced l7policy was not found\n \"\"\"\n db_listener = self._listener_repo.get(\n db_apis.get_session(), id=original_l7policy[constants.LISTENER_ID])\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n [db_listener]))\n\n store = {constants.L7POLICY: original_l7policy,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: db_listener.load_balancer.id,\n constants.UPDATE_DICT: l7policy_updates}\n self.services_controller.run_poster(\n flow_utils.get_update_l7policy_flow,\n store=store)\n\n def create_l7rule(self, l7rule):\n \"\"\"Creates an L7 Rule.\n\n :param l7rule: Provider dict l7rule\n :returns: None\n :raises NoResultFound: Unable to find the object\n \"\"\"\n db_l7policy = self._l7policy_repo.get(db_apis.get_session(),\n id=l7rule[constants.L7POLICY_ID])\n\n load_balancer = db_l7policy.listener.load_balancer\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n [db_l7policy.listener]))\n l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(\n db_l7policy)\n\n store = {constants.L7RULE: l7rule,\n constants.L7POLICY: l7policy_dict.to_dict(),\n constants.L7POLICY_ID: db_l7policy.id,\n constants.LISTENERS: listeners_dicts,\n constants.LOADBALANCER_ID: load_balancer.id\n }\n self.services_controller.run_poster(\n flow_utils.get_create_l7rule_flow,\n store=store)\n\n def delete_l7rule(self, l7rule):\n \"\"\"Deletes an L7 rule.\n\n :param l7rule: Provider dict of the l7rule to delete\n :returns: None\n :raises L7RuleNotFound: The referenced l7rule was not found\n \"\"\"\n db_l7policy = self._l7policy_repo.get(db_apis.get_session(),\n id=l7rule[constants.L7POLICY_ID])\n l7policy = provider_utils.db_l7policy_to_provider_l7policy(db_l7policy)\n load_balancer = db_l7policy.listener.load_balancer\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n [db_l7policy.listener]))\n\n store = {constants.L7RULE: l7rule,\n constants.L7POLICY: l7policy.to_dict(),\n constants.LISTENERS: listeners_dicts,\n constants.L7POLICY_ID: db_l7policy.id,\n constants.LOADBALANCER_ID: load_balancer.id\n }\n self.services_controller.run_poster(\n flow_utils.get_delete_l7rule_flow,\n store=store)\n\n def update_l7rule(self, original_l7rule, l7rule_updates):\n \"\"\"Updates an L7 rule.\n\n :param l7rule: Origin dict of the l7rule to update\n :param l7rule_updates: Dict containing updated l7rule attributes\n :returns: None\n :raises L7RuleNotFound: The referenced l7rule was not found\n \"\"\"\n db_l7policy = self._l7policy_repo.get(\n db_apis.get_session(), id=original_l7rule[constants.L7POLICY_ID])\n load_balancer = db_l7policy.listener.load_balancer\n\n listeners_dicts = (\n provider_utils.db_listeners_to_provider_dicts_list_of_dicts(\n [db_l7policy.listener]))\n l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(\n db_l7policy)\n\n store = {constants.L7RULE: original_l7rule,\n constants.L7POLICY: l7policy_dict.to_dict(),\n constants.LISTENERS: listeners_dicts,\n constants.L7POLICY_ID: db_l7policy.id,\n constants.LOADBALANCER_ID: load_balancer.id,\n constants.UPDATE_DICT: l7rule_updates}\n self.services_controller.run_poster(\n flow_utils.get_update_l7rule_flow,\n store=store)\n\n def _perform_amphora_failover(self, amp, priority):\n \"\"\"Internal method to perform failover operations for an amphora.\n\n :param amp: The amphora to failover\n :param priority: The create priority\n :returns: None\n \"\"\"\n stored_params = {constants.FAILED_AMPHORA: amp.to_dict(),\n constants.LOADBALANCER_ID: amp.load_balancer_id,\n constants.BUILD_TYPE_PRIORITY: priority, }\n\n if amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP):\n amp_role = 'master_or_backup'\n elif amp.role == constants.ROLE_STANDALONE:\n amp_role = 'standalone'\n elif amp.role is None:\n amp_role = 'spare'\n else:\n amp_role = 'undefined'\n\n LOG.info(\"Perform failover for an amphora: %s\",\n {\"id\": amp.id,\n \"load_balancer_id\": amp.load_balancer_id,\n \"lb_network_ip\": amp.lb_network_ip,\n \"compute_id\": amp.compute_id,\n \"role\": amp_role})\n\n if amp.status == constants.DELETED:\n LOG.warning('Amphora %s is marked DELETED in the database but '\n 'was submitted for failover. Deleting it from the '\n 'amphora health table to exclude it from health '\n 'checks and skipping the failover.', amp.id)\n self._amphora_health_repo.delete(db_apis.get_session(),\n amphora_id=amp.id)\n return\n\n if (CONF.house_keeping.spare_amphora_pool_size == 0) and (\n CONF.nova.enable_anti_affinity is False):\n LOG.warning(\"Failing over amphora with no spares pool may \"\n \"cause delays in failover times while a new \"\n \"amphora instance boots.\")\n\n # if we run with anti-affinity we need to set the server group\n # as well\n lb = self._amphora_repo.get_lb_for_amphora(\n db_apis.get_session(), amp.id)\n provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(\n lb).to_dict() if lb else lb\n if CONF.nova.enable_anti_affinity and lb:\n stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id\n if lb is not None and lb.flavor_id:\n stored_params[constants.FLAVOR] = (\n self._flavor_repo.get_flavor_metadata_dict(\n db_apis.get_session(), lb.flavor_id))\n else:\n stored_params[constants.FLAVOR] = {}\n if lb and lb.availability_zone:\n stored_params[constants.AVAILABILITY_ZONE] = (\n self._az_repo.get_availability_zone_metadata_dict(\n db_apis.get_session(), lb.availability_zone))\n else:\n stored_params[constants.AVAILABILITY_ZONE] = {}\n\n self.services_controller.run_poster(\n flow_utils.get_failover_flow,\n role=amp.role, load_balancer=provider_lb,\n store=stored_params, wait=True)\n\n LOG.info(\"Successfully completed the failover for an amphora: %s\",\n {\"id\": amp.id,\n \"load_balancer_id\": amp.load_balancer_id,\n \"lb_network_ip\": amp.lb_network_ip,\n \"compute_id\": amp.compute_id,\n \"role\": amp_role})\n\n def failover_amphora(self, amphora_id):\n \"\"\"Perform failover operations for an amphora.\n\n :param amphora_id: ID for amphora to failover\n :returns: None\n :raises AmphoraNotFound: The referenced amphora was not found\n \"\"\"\n try:\n amp = self._amphora_repo.get(db_apis.get_session(),\n id=amphora_id)\n if not amp:\n LOG.warning(\"Could not fetch Amphora %s from DB, ignoring \"\n \"failover request.\", amphora_id)\n return\n self._perform_amphora_failover(\n amp, constants.LB_CREATE_FAILOVER_PRIORITY)\n if amp.load_balancer_id:\n LOG.info(\"Mark ACTIVE in DB for load balancer id: %s\",\n amp.load_balancer_id)\n self._lb_repo.update(\n db_apis.get_session(), amp.load_balancer_id,\n provisioning_status=constants.ACTIVE)\n except Exception as e:\n try:\n self._lb_repo.update(\n db_apis.get_session(), amp.load_balancer_id,\n provisioning_status=constants.ERROR)\n except Exception:\n LOG.error(\"Unable to revert LB status to ERROR.\")\n with excutils.save_and_reraise_exception():\n LOG.error(\"Amphora %(id)s failover exception: %(exc)s\",\n {'id': amphora_id, 'exc': e})\n\n def failover_loadbalancer(self, load_balancer_id):\n \"\"\"Perform failover operations for a load balancer.\n\n :param load_balancer_id: ID for load balancer to failover\n :returns: None\n :raises LBNotFound: The referenced load balancer was not found\n \"\"\"\n\n # Note: This expects that the load balancer is already in\n # provisioning_status=PENDING_UPDATE state\n try:\n lb = self._lb_repo.get(db_apis.get_session(),\n id=load_balancer_id)\n\n # Exclude amphora already deleted\n amps = [a for a in lb.amphorae if a.status != constants.DELETED]\n for amp in amps:\n # failover amphora in backup role\n # Note: this amp may not currently be the backup\n # TODO(johnsom) Change this to query the amp state\n # once the amp API supports it.\n if amp.role == constants.ROLE_BACKUP:\n self._perform_amphora_failover(\n amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)\n\n for amp in amps:\n # failover everyhting else\n if amp.role != constants.ROLE_BACKUP:\n self._perform_amphora_failover(\n amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)\n\n self._lb_repo.update(\n db_apis.get_session(), load_balancer_id,\n provisioning_status=constants.ACTIVE)\n\n except Exception as e:\n with excutils.save_and_reraise_exception():\n LOG.error(\"LB %(lbid)s failover exception: %(exc)s\",\n {'lbid': load_balancer_id, 'exc': e})\n self._lb_repo.update(\n db_apis.get_session(), load_balancer_id,\n provisioning_status=constants.ERROR)\n\n def amphora_cert_rotation(self, amphora_id):\n \"\"\"Perform cert rotation for an amphora.\n\n :param amphora_id: ID for amphora to rotate\n :returns: None\n :raises AmphoraNotFound: The referenced amphora was not found\n \"\"\"\n\n amp = self._amphora_repo.get(db_apis.get_session(),\n id=amphora_id)\n LOG.info(\"Start amphora cert rotation, amphora's id is: %s\", amp.id)\n\n store = {constants.AMPHORA: amp.to_dict(),\n constants.AMPHORA_ID: amphora_id}\n\n self.services_controller.run_poster(\n flow_utils.cert_rotate_amphora_flow,\n store=store)\n\n def update_amphora_agent_config(self, amphora_id):\n \"\"\"Update the amphora agent configuration.\n\n Note: This will update the amphora agent configuration file and\n update the running configuration for mutatable configuration\n items.\n\n :param amphora_id: ID of the amphora to update.\n :returns: None\n \"\"\"\n LOG.info(\"Start amphora agent configuration update, amphora's id \"\n \"is: %s\", amphora_id)\n amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)\n lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),\n amphora_id)\n flavor = {}\n if lb.flavor_id:\n flavor = self._flavor_repo.get_flavor_metadata_dict(\n db_apis.get_session(), lb.flavor_id)\n\n store = {constants.AMPHORA: amp.to_dict(),\n constants.FLAVOR: flavor}\n\n self.services_controller.run_poster(\n flow_utils.update_amphora_config_flow,\n store=store)\n","sub_path":"octavia/controller/worker/v2/controller_worker.py","file_name":"controller_worker.py","file_ext":"py","file_size_in_byte":41022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"133569048","text":"import os\nimport sys\nimport numpy\nsys.path.append(\"/tank/georgioutk/cliffordConvolutionRegAngGrads/\")\nimport tensorflow as tf\nimport cliffordConvolution as cc\nimport time\nimport pickle\nimport scipy\nfrom matplotlib import pyplot as plt\n\ndef plot_field(field):\n\tfig, axes = plt.subplots(nrows=field.shape[0], ncols=field.shape[1])\n\tfor i in range(field.shape[0]):\n\t\tfor j in range(field.shape[1]):\n\t\t\taxes[i,j].quiver(0, 0, field[i,j,0], -field[i,j,1], angles='xy', scale_units='xy', scale=1)\n\t\t\taxes[i,j].set_xlim(-1.5, 1.5)\n\t\t\taxes[i,j].set_ylim(-1.5, 1.5)\n\t\t\taxes[i,j].set_xticks([])\n\t\t\taxes[i,j].set_yticks([])\n\tplt.subplots_adjust(wspace=0, hspace=0)\n\t# plt.show()\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train, x_test = numpy.expand_dims((x_train / 255.0).astype(numpy.float32), -1), numpy.expand_dims((x_test / 255.0).astype(numpy.float32), -1)\ny_train, y_test = y_train.astype(numpy.int32), y_test.astype(numpy.int32)\n\ncifar = tf.keras.datasets.cifar10\n(x_train, y_train),(x_test, y_test) = cifar.load_data()\nx_train, x_test = (x_train / 255.0).astype(numpy.float32), (x_test / 255.0).astype(numpy.float32)\ny_train, y_test = y_train.astype(numpy.int32), y_test.astype(numpy.int32)\n\ng_train = numpy.gradient(x_train, axis=[1,2])\ng_test = numpy.gradient(x_test, axis=[1,2])\n\ngrads0 = tf.placeholder(tf.float32, [None, None, None, None])\ngrads1 = tf.placeholder(tf.float32, [None, None, None, None])\n# grads0 = tf.placeholder(tf.float32, [60000,28,28,1])\n# grads1 = tf.placeholder(tf.float32, [60000,28,28,1])\ngrads = tf.concat([grads0, grads1], axis=-1)\navgrads = tf.layers.average_pooling2d(grads, [3,3], [1,1], padding='SAME')\nrotSecond = cc.transformations.rotateVectorField(avgrads, angle, irelevantAxisFirst=True)\nsepGrads = tf.split(avgrads, 2, -1)\n\nsess = tf.Session()\n# ag_train = sess.run(sepGrads, feed_dict={grads0: g_train[0], grads1:g_train[1]})\n# ag_test = sess.run(sepGrads, feed_dict={grads0: g_test[0], grads1:g_test[1]})\n\ncg_train = sess.run(grads, feed_dict={grads0: g_train[0], grads1:g_train[1]})\ncg_test = sess.run(grads, feed_dict={grads0: g_test[0], grads1:g_test[1]})\n\nmask_train = numpy.sqrt(ag_train[0]**2 + ag_train[1]**2) < 1e-2\nmask_test = numpy.sqrt(ag_test[0]**2 + ag_test[1]**2) < 1e-2\n\nmask_train[:,:,0,:] = False\nmask_train[:,:,-1,:] = False\nmask_train[:,0,:,:] = False\nmask_train[:,-1,:,:] = False\nmask_test[:,:,0,:] = False\nmask_test[:,:,-1,:] = False\nmask_test[:,0,:,:] = False\nmask_test[:,-1,:,:] = False\n# ag_trainV = numpy.concatenate(ag_train, axis=-1)\n\nag_train[0][mask_train] = numpy.nan\nag_train[1][mask_train] = numpy.nan\nag_test[0][mask_test] = numpy.nan\nag_test[1][mask_test] = numpy.nan\n\nag_trainWithMask = [numpy.ma.masked_invalid(ag_train[0]), numpy.ma.masked_invalid(ag_train[1])]\nag_testWithMask = [numpy.ma.masked_invalid(ag_test[0]), numpy.ma.masked_invalid(ag_test[1])]\n\nx = numpy.arange(0, ag_train[0].shape[2])\ny = numpy.arange(0, ag_train[0].shape[1])\n\nxx, yy = numpy.meshgrid(x, y)\n\nag_trainInterpolated = [numpy.zeros(shape=ag_train[0].shape), numpy.zeros(shape=ag_train[1].shape)]\nfor index in range(ag_trainWithMask[0].shape[0]):\n\texample = ag_trainWithMask[0][index,:,:,0]\n\t#get only the valid values\n\tx1 = xx[~example.mask]\n\ty1 = yy[~example.mask]\n\tnewarr = example[~example.mask]\n\tGD1 = scipy.interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method='cubic')\n\tag_trainInterpolated[0][index,:,:,0] = GD1\n\nfor index in range(ag_trainWithMask[1].shape[0]):\n\texample = ag_trainWithMask[1][index,:,:,0]\n\t#get only the valid values\n\tx1 = xx[~example.mask]\n\ty1 = yy[~example.mask]\n\tnewarr = example[~example.mask]\n\tGD1 = scipy.interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method='cubic')\n\tag_trainInterpolated[1][index,:,:,0] = GD1\n\nag_testInterpolated = [numpy.zeros(shape=ag_test[0].shape), numpy.zeros(shape=ag_test[1].shape)]\nfor index in range(ag_testWithMask[0].shape[0]):\n\texample = ag_testWithMask[0][index,:,:,0]\n\t#get only the valid values\n\tx1 = xx[~example.mask]\n\ty1 = yy[~example.mask]\n\tnewarr = example[~example.mask]\n\tGD1 = scipy.interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method='cubic')\n\tag_testInterpolated[0][index,:,:,0] = GD1\n\nfor index in range(ag_testWithMask[1].shape[0]):\n\texample = ag_testWithMask[1][index,:,:,0]\n\t#get only the valid values\n\tx1 = xx[~example.mask]\n\ty1 = yy[~example.mask]\n\tnewarr = example[~example.mask]\n\tGD1 = scipy.interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method='cubic')\n\tag_testInterpolated[1][index,:,:,0] = GD1\n\na_train = numpy.arctan2(ag_train[1], ag_train[0])\na_trainInterpolated = numpy.arctan2(ag_trainInterpolated[1], ag_trainInterpolated[0])\n\na_test = numpy.arctan2(ag_test[1], ag_test[0])\na_testInterpolated = numpy.arctan2(ag_testInterpolated[1], ag_testInterpolated[0])\n\n\n\n\nmagn = tf.placeholder(tf.float32, [60000,28,28,1])\nangs = tf.placeholder(tf.float32, [60000,28,28,1])\nmagn2 = tf.placeholder(tf.float32, [10000,28,28,1])\nangs2 = tf.placeholder(tf.float32, [10000,28,28,1])\nnormMagn = cc.layers.normalizeVectorField(magn, 3, 3)\nvField = cc.transformations.changeToCartesian(normMagn, angs)\nnormMagn2 = cc.layers.normalizeVectorField(magn2, 3, 3)\nvField2 = cc.transformations.changeToCartesian(normMagn2, angs2)\n# magn = tf.placeholder(tf.float32, [60000,28,28,1])\n# angs = tf.placeholder(tf.float32, [60000,28,28,1])\n# vField = cc.transformations.changeToCartesian(magn, angs)\n\nv_trainInterpolated, v_testInterpolated = sess.run([vField, vField2], feed_dict={magn: x_train, angs: a_trainInterpolated, magn2: x_test, angs2: a_testInterpolated})\n# v_testInterpolated = sess.run(vField, feed_dict={magn: x_test, angs: a_testInterpolated})\nv_train = sess.run(vField, feed_dict={magn: x_train, angs: a_train})\n\npickle.dump(v_trainInterpolated, open(\"vMnistTrain3x3AP.pkl\",\"wb\"))\npickle.dump(v_testInterpolated, open(\"vMnistTest3x3AP.pkl\",\"wb\"))\n\npickle.dump(cg_train, open(\"gCifar10Train.pkl\",\"wb\"))\npickle.dump(cg_test, open(\"gCifar10Test.pkl\",\"wb\"))\n\n\nplt.imshow(a_train[0,:,:,0])\nplt.show()","sub_path":"tests/fillMissingValuesInImage.py","file_name":"fillMissingValuesInImage.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"157788875","text":"import decimal \n\ndef num2words(num):\n num = decimal.Decimal(num)\n decimal_part = num - int(num)\n num = int(num)\n\n if decimal_part:\n return num2words(num) + \" point \" + (\" \".join(num2words(i) for i in str(decimal_part)[2:]))\n\n under_20 = ['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']\n tens = ['Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']\n above_100 = {100: 'Hundred', 1000: 'Thousand', 100000: 'Lakhs', 10000000: 'Crores'}\n\n if num < 20:\n return under_20[num]\n\n if num < 100:\n return tens[num // 10 - 2] + ('' if num % 10 == 0 else ' ' + under_20[num % 10])\n\n # find the appropriate pivot - 'Million' in 3,603,550, or 'Thousand' in 603,550\n pivot = max([key for key in above_100.keys() if key <= num])\n\n return num2words(num // pivot) + ' ' + above_100[pivot] + ('' if num % pivot==0 else ' ' + num2words(num % pivot))\n\n\nprint(num2words(decimal.Decimal(\"238484\")))","sub_path":"num2words.py","file_name":"num2words.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"36409224","text":"import json\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef Linkedin(linkedin_profile_link):\n result = []\n\n try:\n r = requests.get(url=linkedin_profile_link)\n soup = BeautifulSoup(r.text, \"html.parser\")\n data = json.loads(soup.find('script', type='application/ld+json').text)\n data1 = [element.text for element in soup.find_all(\"div\", class_=\"result-card__title experience-item__title\")]\n\n result.append('Lives in ' + data['address']['addressLocality'])\n if (data1):\n result.append('Affiliations include')\n for i in range(len(data1)):\n result.append(str(i + 1) + ': ' + data1[i])\n except:\n result.append('Could not retrieve anything')\n print (*result)","sub_path":"CyberRATWeb/scrapers/linkedin_scrapper.py","file_name":"linkedin_scrapper.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"302887403","text":"from objetos.DecisionTreeFirst import DecisionTreeFirst\nfrom objetos.DecisionTreeExampleZero import DecisionTreeExampleZero\nfrom objetos.DecisionTreeIncidencias import DecisionTreeIncidencias\n\n\nclass DecisionTreeExample:\n\n @staticmethod\n def main(*args, **kwargs):\n #DecisionTreeExample.example_zero()\n #DecisionTreeExample.example_first()\n DecisionTreeExample.example_second()\n\n @staticmethod\n def example_zero():\n arbol = DecisionTreeExampleZero()\n arbol.feature_for_setosa()\n arbol.print_data_col()\n\n @staticmethod\n def example_first():\n arbol = DecisionTreeFirst(max_depth=3)\n arbol.imp_score_predict()\n arbol.imp_predict()\n arbol.abrir_dot()\n #arbol.graficar_caracteristicas_importantes()\n #arbol.graficar_clasificacion()\n\n @staticmethod\n def example_second():\n arbol = DecisionTreeIncidencias('incidencia_entrenar.csv')\n arbol.imp_score_predict()\n arbol.imp_predict()\n arbol.abrir_dot()\n\n\nif __name__ == '__main__':\n DecisionTreeExample.main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"449065427","text":"# Copyright 2018 The KaiJIN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport math\nimport torch\nimport numpy as np\nfrom .colorspace import *\nfrom imgaug import augmenters as iaa\n\n\ndef imnormalize(img, mean, std, to_rgb=True):\n\n img = img.astype(np.float32)\n if to_rgb:\n img = bgr2rgb(img)\n return (img - mean) / std\n\n\nclass Normalize(iaa.Augmenter):\n def __init__(self, mean, std, to_rgb=True, name=None, deterministic=False, random_state=None):\n super(Normalize, self).__init__(\n name=name, deterministic=True, random_state=random_state)\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n self.to_rgb = to_rgb\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n raise NotImplementedError()\n\n def _augment_images(self, images, random_state, parents, hooks):\n\n results = []\n\n for image in images:\n image = imnormalize(image, self.mean, self.std, self.to_rgb)\n results.append(image)\n\n return results\n\n def _augment_keypoints(self, keypoints_on_image, random_state, parents, hooks):\n\n return keypoints_on_image\n\n def _augment_polygons(self, polygons_on_image, random_state, parents, hooks):\n\n return polygons_on_image\n\n def get_parameters(self):\n raise NotImplementedError()\n\n\nclass ToFloat(iaa.Augmenter):\n def __init__(self,\n name=None,\n deterministic=False,\n random_state=None):\n super(ToFloat, self).__init__(name=name,\n deterministic=deterministic,\n random_state=random_state)\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_images(self, images, random_state, parents, hooks):\n results = []\n for image in images:\n m = image.astype('float32')\n results.append(m)\n return results\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def _augment_polygons(self, polygons_on_images, random_state, parents, hooks):\n return self._augment_polygons_as_keypoints(\n polygons_on_images, random_state, parents, hooks)\n \n def get_parameters(self):\n raise NotImplementedError()\n\n\nclass ToTensor(iaa.Augmenter):\n \"\"\"To pytorch tensor\n \"\"\"\n\n def __init__(self,\n image_scale=255,\n image_mean=None,\n image_std=None,\n heatmap_scale=None,\n name=None,\n deterministic=False,\n random_state=None):\n super(ToTensor, self).__init__(name=name,\n deterministic=deterministic,\n random_state=random_state)\n self.image_scale = image_scale\n self.heatmap_scale = heatmap_scale\n self.image_mean = image_mean\n self.image_std = image_std\n if self.image_mean is not None:\n self.image_mean = torch.as_tensor(\n self.image_mean, dtype=torch.float32, device='cpu')\n if self.image_std is not None:\n self.image_std = torch.as_tensor(\n self.image_std, dtype=torch.float32, device='cpu')\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n results = []\n for heatmap in heatmaps:\n m = torch.from_numpy(np.ascontiguousarray(heatmap.get_arr()))\n results.append(m)\n return results\n\n def _augment_images(self, images, random_state, parents, hooks):\n results = []\n for image in images:\n m = torch.from_numpy(np.ascontiguousarray(image.transpose((2, 0, 1))))\n m = m.type(torch.FloatTensor)\n if self.image_scale is not None:\n m = m.float().div(self.image_scale)\n if self.image_mean is not None:\n m.sub_(self.image_mean[:, None, None])\n if self.image_std is not None:\n m.div_(self.image_std[:, None, None])\n results.append(m)\n return results\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def _augment_polygons(self, polygons_on_images, random_state, parents, hooks):\n return self._augment_polygons_as_keypoints(\n polygons_on_images, random_state, parents, hooks)\n\n def get_parameters(self):\n raise NotImplementedError()\n\n\n# class Normalize(iaa.Augmenter):\n# \"\"\"Normalize\n# \"\"\"\n\n# def __init__(self,\n# mean,\n# std,\n# name=None,\n# deterministic=False,\n# random_state=None):\n# super(Normalize, self).__init__(name=name,\n# deterministic=deterministic,\n# random_state=random_state)\n# self.mean = np.array(mean)\n# self.std = np.array(std)\n\n# def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n# return heatmaps\n\n# def _augment_images(self, images, random_state, parents, hooks):\n# raise NotImplementedError()\n\n# def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n# return keypoints_on_images\n\n# def _augment_polygons(self, polygons_on_images, random_state, parents, hooks):\n# return self._augment_polygons_as_keypoints(\n# polygons_on_images, random_state, parents, hooks)\n\n# def get_parameters(self):\n# raise NotImplementedError()\n\n\nclass TruncatedStandardize(iaa.Augmenter):\n \"\"\"Implemented in TensorFlow\"\"\"\n\n def __init__(self,\n name=None,\n deterministic=False,\n random_state=None):\n super(TruncatedStandardize, self).__init__(name=name,\n deterministic=deterministic,\n random_state=random_state)\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_images(self, images, random_state, parents, hooks):\n results = []\n for idx, image in enumerate(images):\n h, w, c = image.shape\n image = image.astype('float32')\n min_std = 1.0 / math.sqrt(float(h * w * c))\n adjust_std = max(np.std(image), min_std)\n image = (image - np.mean(image)) / adjust_std\n results.append(image)\n return results\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def _augment_polygons(self, polygons_on_images, random_state, parents, hooks):\n return self._augment_polygons_as_keypoints(\n polygons_on_images, random_state, parents, hooks)\n\n def get_parameters(self):\n raise NotImplementedError()\n","sub_path":"tw/transform/augmenter/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":7194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"145237926","text":"import numpy as np\nimport cv2\nimport time\n\ndef main():\n img = np.zeros([100,100])\n phi_scalar = np.full_like(img, img.shape[0]*img.shape[1]*2)\n f_scalar = np.zeros_like(img)\n n_max = img.shape[0]*2+(img.shape[1]-2)*2 \n p = np.zeros([n_max, 2])\n n = 0\n nb = np.full_like(img, False)\n \n i_in = np.zeros([img.shape[0], 2])\n j_in = np.zeros([img.shape[1], 2])\n cpos = np.asarray([(0,0), (img.shape[0]-1, img.shape[1]-1)])\n c = np.zeros_like(img)\n for i in range(len(c)):\n for j in range(len(c[0])):\n if ((i == cpos[0,0] or i == cpos[1,0]) and (cpos[0,1] <= j and j <= cpos[1,1])) \\\n or ((j == cpos[0,1] or j == cpos[1,1]) and (cpos[0,0] <= i and i <= cpos[1,0])):\n c[i,j] = 1\n \n for i in range(len(i_in)):\n ccc = [ii for ii, x in enumerate(c[:, i]) if x == 1]\n if len(ccc) > 1:\n i_in[i] = [ccc[0], ccc[-1]]\n else:\n i_in[i] = [-1, -1]\n for j in range(len(j_in)):\n ccc = [jj for jj, x in enumerate(c[j, :]) if x == 1]\n if len(ccc) > 1:\n j_in[j] = [ccc[0], ccc[-1]]\n else:\n j_in[j] = [-1, -1]\n\n # start = time.time()\n\n for i in range(len(phi_scalar)):\n for j in range(len(phi_scalar[0])):\n if c[i,j] == 1:\n phi_scalar[i,j] = 0\n get_euclidean(phi_scalar, (i,j), c)\n\n nb = phi_scalar <= 4\n print(nb)\n\n for i in range(len(phi_scalar)):\n for j in range(len(phi_scalar[0])):\n if i_in[i,0] < i and i < i_in[i,1] and j_in[j,0] < j and j < j_in[j,1]:\n phi_scalar[i,j] *= -1\n\n # elapsed_time = time.time() - start\n # print (\"elapsed_time:{0}\".format(elapsed_time) + \"[sec]\")\n\n \n\n print(phi_scalar)\n\ndef get_euclidean(phi, ij, c):\n for i in range(len(phi)):\n for j in range(len(phi)):\n # phi[i,j] = min(phi[i,j], e_table[abs(ij[0]-i),abs(ij[1]-j)])\n phi[i,j] = min(phi[i,j], (ij[0]-i)**2+(ij[1]-j)**2)\n\ndef get_euclidean_table(phi, ij, c, e_table):\n ## e_table's template\n # tmpl = np.asarray([x**2 for x in range(max(img.shape[0], img.shape[1]))])\n # tmpmap = np.zeros_like(img, dtype=int)\n # for i in range(len(tmpmap)):\n # for j in range(len(tmpmap[0])):\n # tmpmap[i,j] = tmpl[i]+tmpl[j]\n # print(tmpmap)\n\n for i in range(len(phi)):\n for j in range(len(phi)):\n phi[i,j] = min(phi[i,j], e_table[abs(ij[0]-i),abs(ij[1]-j)])\n \nif __name__ == \"__main__\":\n main()","sub_path":"segmentation/level_set_method.py","file_name":"level_set_method.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"221269142","text":"\n\n#calss header\nclass _LARCENY():\n\tdef __init__(self,): \n\t\tself.name = \"LARCENY\"\n\t\tself.definitions = [u'stealing, especially (in the US) the crime of taking something that does not belong to you, without illegally entering a building to do so']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_larceny.py","file_name":"_larceny.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"595431043","text":"import argparse\n\nimport sys, os\nsys.path.insert(0, './')\n\nimport lexer\nimport yacc\n\nparser = argparse.ArgumentParser(prog=sys.argv[0], usage=\"./bin/ekcc[.py] [-h|-?] [-v] [-O] [-emit-ast|-emit-llvm] -o \", add_help=False)\nparser.add_argument(\"-h\", action=\"help\", help=\"show this help message and exit\")\nparser.add_argument(\"-v\", action='store_true', help=\"print information for debugging\")\nparser.add_argument(\"-O\", action='store_true', help=\"enable optimization\")\nparser.add_argument(\"-emit-ast\", action='store_true', help=\"dump AST in a YAML format\")\nparser.add_argument(\"-emit-llvm\", action='store_true', help=\"output LLVM IR\")\nparser.add_argument(\"-o\", help=\"set output file path\", default=sys.stdout)\nargs, unknown = parser.parse_known_args()\n\nif len(unknown) != 1:\n raise ValueError(\"Usage: ./bin/ekcc.py \")\nelse:\n if args.emit_ast == True:\n with open(unknown[0], 'r') as input: \n content = input.read()\n result = yacc.parse(content)\n output_file_path = args.o\n if isinstance(args.o, str):\n with open(output_file_path, 'w') as output:\n output.write(result)\n else:\n args.o.write(result)","sub_path":"bin/ekcc.py","file_name":"ekcc.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"418126477","text":"# https://www.acmicpc.net/problem/10026\n\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.readline\n\nN = int(input())\ntable = [list(input()) for _ in range(N)]\nvisited = [[0] * N for _ in range(N)]\n\n\ndef dfs(node, is_RG=False):\n x, y = node\n if visited[x][y]:\n return\n visited[x][y] = 1\n\n if is_RG:\n if table[x][y] == \"B\":\n for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n nx, ny = x + dx, y + dy\n if 0 <= nx < N and 0 <= ny < N:\n if not visited[nx][ny] and table[nx][ny] == \"B\":\n dfs((nx, ny), is_RG)\n else:\n for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n nx, ny = x + dx, y + dy\n if 0 <= nx < N and 0 <= ny < N:\n if not visited[nx][ny] and table[nx][ny] != \"B\":\n dfs((nx, ny), is_RG)\n\n else:\n for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n nx, ny = x + dx, y + dy\n if 0 <= nx < N and 0 <= ny < N:\n if not visited[nx][ny] and table[nx][ny] == table[x][y]:\n dfs((nx, ny), is_RG)\n\n\nanswer1 = 0\nanswer2 = 0\nfor i in range(N):\n for j in range(N):\n if not visited[i][j]:\n answer1 += 1\n dfs((i, j))\nvisited = [[0] * N for _ in range(N)]\nfor i in range(N):\n for j in range(N):\n if not visited[i][j]:\n answer2 += 1\n dfs((i, j), True)\nprint(\"{} {}\".format(answer1, answer2))\n","sub_path":"BOJ/graph/10026.py","file_name":"10026.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"411708928","text":"import requests\n\n\nclass LolSettings:\n def __init__(self, summoner, region):\n self.summoner = summoner\n self.region = region\n self.headers = {'X-Riot-Token': ''}\n\n def start(self):\n url = f'https://la1.api.riotgames.com/lol/summoner/v4/summoners/by-name/{self.summoner}'\n response = requests.get(url, headers=self.headers)\n return response.json()\n\nclass Lol(LolSettings):\n def __init__(self, summoner, region):\n super().__init__(summoner, region)\n \n def greetings(self):\n summoner = self.start()\n name = summoner['name']\n lvl = summoner['summonerLevel']\n icon_id = summoner['profileIconId']\n\n greetings = f'Saludos invocador {name}, lvl {lvl}.'\n icon_url = f'https://ddragon.leagueoflegends.com/cdn/11.6.1/img/profileicon/{icon_id}.png'\n return {'greetings': greetings, 'icon_url': icon_url}\n \n def rank(self):\n summoner = self.start()\n summoner_id = summoner['id']\n name = summoner['name']\n url = f'https://la1.api.riotgames.com/lol/league/v4/entries/by-summoner/{summoner_id}'\n response = requests.get(url, headers=self.headers)\n print(response.json())\n return response.json()[0]","sub_path":"utils/lol.py","file_name":"lol.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"604240001","text":"def partition(lst):\n pivot = lst[0]\n\n for i in range(1, len(lst)):\n if lst[i] < pivot:\n lst.insert(0, lst.pop(i))\n\ndef main():\n list1 = [int(x) for x in input(\"Enter some values: \").split()]\n\n partition(list1)\n print(\"After the partition, the list is\", end = \" \")\n for num in list1:\n print(num, end = \" \")\n print()\n\nmain()","sub_path":"PythonProgramming/cp10/프로그래밍 연습문제(cp10)/10.28.py","file_name":"10.28.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"463242022","text":"from rest_framework.routers import SimpleRouter\nfrom health_workforce import views # import the views for routing on the api endpoints\n\nrouter = SimpleRouter()\nrouter.register(\n r'courses', views.StgInstitutionProgrammesViewSet,'course')\nrouter.register(\n r'training_types',views.StgInstitutionTypeViewSet,'training_type')\nrouter.register(\n r'institutions', views.StgTrainingInstitutionViewSet,'institution')\nrouter.register(r'cadres', views.StgHealthCadreViewSet,'carde')\nrouter.register(\n r'workforce',views.StgHealthWorkforceFactsViewSet,'workforce')\nurlpatterns = router.urls\n","sub_path":"health_workforce/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"505434506","text":"import sys\nimport collections\n\nCashier = collections.namedtuple(\"Cashier\", [\"m\", \"s\", \"p\"])\n\ntc = int(sys.stdin.readline())\n\n\ndef find_min_time(nr, nb, cs):\n l = 0\n r = int(10**18 * 2)\n while l < r:\n mt = (l + r) // 2\n mc = []\n css = []\n for c in cs:\n if c.p >= mt:\n continue\n css.append(c)\n for c in css:\n mc.append(min(c.m, (mt - c.p) // c.s))\n mc.sort()\n mc.reverse()\n if sum(mc[:nr]) >= nb:\n r = mt\n else:\n l = mt + 1\n return l\n\n\nfor tn in range(tc):\n r, b, c = map(int, sys.stdin.readline().split())\n cs = []\n for _ in range(c):\n m, s, p = map(int, sys.stdin.readline().split())\n cs.append(Cashier(m, s, p))\n print(\"Case #%d: %d\" % (tn + 1, find_min_time(r, b, cs)))\n\nCLOSE","sub_path":"src/2018/firstb/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"316499224","text":"\nfrom source.model import *\n\n\nuser = Bot_user.add(6,'admin','Bob','Bobovich')\nLiked_film_list.add(6,6,'Interstellar',1,1414281600,8.1111)\nExpected_film_list.add(7, 6, 'New film', 1, 1714281600, 8.456)\n\n\n\n\n","sub_path":"Bektimirov_Alim/workshop4/source/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"620170652","text":"\"\"\"\nYou are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse\norder and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\nExample:\n\nInput: (2 -> 4 -> 3) + (5 -> 6 -> 4)\nOutput: 7 -> 0 -> 8\nExplanation: 342 + 465 = 807.\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n dummyHead = ListNode(0)\n p, q, curr, carry = l1, l2, dummyHead, 0\n while p is not None or q is not None:\n x = p.val if p is not None else 0\n y = q.val if q is not None else 0\n sum = carry + x + y\n carry = int(sum / 10) # 无进位(<1)时为0,有进位(>1)为1\n curr.next = ListNode(sum % 10)\n curr = curr.next # 走链\n if p is not None:\n p = p.next\n if q is not None:\n q = q.next\n if carry > 0: # 循环结束,如果还有进位\n curr.next = ListNode(carry)\n return dummyHead.next\n\n\ns = Solution()\nl1 = ListNode(2)\nl1.next = ListNode(4)\nl1.next.next = ListNode(3)\nl2 = ListNode(5)\nl2.next = ListNode(6)\nl2.next.next = ListNode(4)\nr = s.addTwoNumbers(l1, l2)\nprint([r.val, r.next.val, r.next.next.val])\n","sub_path":"_002_add_two_numbers.py","file_name":"_002_add_two_numbers.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"604799490","text":"import http\nimport os\n\nimport flask\nimport src.mongo.service\nimport src.sample_data\n\nPORT = os.environ.get(\"PORT\", 3001)\nENDPOINT_MASTER_DETAIL = \"/api/masterdetail\"\nENDPOINT_LIST = \"/api/list\"\nENDPOINT_GRID = \"/api/grid\"\n\napp = flask.Flask(__name__, static_folder=\"../build\")\n\n# List Endpoints\n@app.route(ENDPOINT_LIST)\ndef get_list():\n return flask.jsonify(src.mongo.service.get())\n\n\n@app.route(ENDPOINT_LIST, methods=[\"POST\"])\ndef add_list_item():\n json_response = flask.jsonify(src.mongo.service.create())\n return flask.make_response(json_response, http.HTTPStatus.CREATED)\n\n\n@app.route(ENDPOINT_LIST + \"/\", methods=[\"DELETE\"])\ndef delete_list_item(item_id):\n try:\n removed_item = flask.jsonify(src.mongo.service.delete(item_id))\n return removed_item\n except ValueError as ex:\n err_response = flask.jsonify({\"error\": str(ex)})\n return flask.make_response(err_response, http.HTTPStatus.NOT_FOUND)\n\n\n# MasterDetail Page Endpoint\n@app.route(ENDPOINT_MASTER_DETAIL)\ndef get_master_detail():\n return flask.jsonify(src.sample_data.sample_orders)\n\n\n# Grid Page Endpoint\n@app.route(ENDPOINT_GRID)\ndef get_grid():\n return flask.jsonify(src.sample_data.sample_orders)\n\n\n# Catching all routes\n# This route is used to serve all the routes in the frontend application after deployment.\n@app.route(\"/\", defaults={\"path\": \"\"})\n@app.route(\"/\")\ndef catch_all(path):\n file_to_serve = \"index.html\"\n if path and os.path.exists(os.path.join(app.static_folder, path)):\n file_to_serve = path\n return flask.send_from_directory(app.static_folder, file_to_serve)\n\n\n# Error Handler\n@app.errorhandler(http.HTTPStatus.NOT_FOUND.value)\ndef page_not_found():\n json_response = flask.jsonify({\"error\": \"Page not found\"})\n return flask.make_response(json_response, http.HTTPStatus.NOT_FOUND)\n\n\nif __name__ == \"__main__\":\n app.run(port=PORT)\n","sub_path":"ReactFlaskWithCosmosMongo/backend/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"641155011","text":"import sys\nsys.path.append(\"../\") # referencia al directorio base\nfrom model import Modulo, Menu\nfrom modelo import Docente\nfrom controladores import ControladorDocente\nfrom i18n import msg\nimport util\nfrom contexto import *\nimport ZODB\nimport BTrees.OOBTree\nimport transaction, persistent\n\nclass ModuloDocente(Modulo):\n __controlador = ControladorDocente()\n\n def __init__(self):\n Modulo.__init__(self, msg('abm.docente.menu.titulo'))\n self.__menu_dict = None\n\n def listar(self):\n docentes = self.get_controlador().get_lista_objetos()\n print(msg('abm.docente.titulo.lista'))\n for doc in docentes:\n print(doc.__str__()) \n self.pausa()\n\n def registrar(self):\n print(msg('abm.docente.titulo.registrar'))\n obligatorio = True\n\n cedula = util.leer_cadena(msg('docente.ingrese.cedula'), obligatorio)\n nombre = str(util.leer_cadena(msg('docente.ingrese.nombre'), obligatorio))\n apellido = str(util.leer_cadena(msg('docente.ingrese.apellido'), obligatorio))\n fecha_nacimiento = str(util.leer_cadena(msg('docente.ingrese.fecha_nacimiento'), obligatorio))\n asignatura = str(util.leer_cadena(msg('docente.ingrese.asignatura'), obligatorio))\n telefono = str(util.leer_cadena(msg('docente.ingrese.telefono'), obligatorio))\n departamento = str(util.leer_cadena(msg('docente.ingrese.departamento'), obligatorio))\n \n docente = Docente(asignatura, departamento, telefono, cedula, nombre, apellido, fecha_nacimiento)\n\n try:\n self.get_controlador().crear(docente)\n print(msg(\"registro.creado\"))\n except Exception as e:\n print(e)\n self.pausa()\n\n def borrar(self):\n print(msg('abm.docente.titulo.borrar'))\n obligatorio = True\n cedula = util.leer_cadena(msg('docente.ingrese.cedula'), obligatorio)\n try:\n docente = self.get_controlador().buscar_codigo(cedula)\n if not docente:\n print(msg('docente.cedula.no.existe'), \":\", cedula)\n else:\n self.get_controlador().borrar(docente)\n print(msg('docente.borrado'))\n except Exception as e:\n print(e)\n self.pausa()\n\n def consultar_docente(self):\n obligatorio = True\n cedula = util.leer_cadena(msg('docente.ingrese.cedula'), obligatorio)\n \n try:\n if not util.es_numerico(cedula):\n raise Exception(\"La cedula debe ser numerica!\")\n \n docente = ControladorDocente().buscar_codigo(cedula)\n return docente\n except Exception as e:\n print(e)\n\n def ir_menu_principal(self):\n self.set_terminar_ejecucion(True)\n\n def get_controlador(self):\n return self.__controlador\n\n def get_menu_dict(self):\n #crear en caso de que aun no se haya creado\n if not self.__menu_dict:\n menu_listar = Menu(msg('abm.docente.listar'), self.listar)\n menu_registrar = Menu(msg('abm.docente.registrar'), self.registrar)\n #menu_borrar = Menu(msg('abm.docente.borrar'), self.borrar)\n menu_principal = Menu(msg('abm.ir.menu.principal'),self.ir_menu_principal)\n menus = {1: menu_listar, 2: menu_registrar, 3: menu_principal}\n self.__menu_dict = menus\n\n return self.__menu_dict\n\n\nif __name__ == \"__main__\":\n ma = ModuloDocente()\n ma.iniciar() \n","sub_path":"abm/abm_docente.py","file_name":"abm_docente.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"351576574","text":"# -*- coding: utf-8 -*-\n\nclass Rc4(object):\n \"\"\" Implementa a Cifra RC4.\n\n Attributes:\n KEY Chave padrão da Cifra\n \"\"\"\n KEY = \"rc4isawesome\"\n\n def __init__(self, key):\n self.KEY = key\n\n def rc4(self, msg):\n \"\"\" Retorna uma mensagem encriptada ou decriptada.\n\n Keyword arguments:\n msg -- mensagem a ser encriptada ou decriptada\n \"\"\"\n S, T = [],[]\n j = 0\n rc4_msg = []\n\n # Inicialização de S e do array temporário T\n for i in range(256):\n S.append(i)\n T.append(ord(self.KEY[i % len(self.KEY)]))\n\n # Permutação inicial de S\n for i in range(256):\n j = (j + S[i] + T[i]) % 256\n S[i], S[j] = S[j], S[i] # Swap\n\n # Stream Generation\n i = j = 0\n\n for byte in msg:\n i = (i + 1) % 256\n j = (j + S[i]) % 256\n S[i], S[j] = S[j], S[i] # Swap\n rc4_msg.append( chr(ord(byte) ^ S[(S[i] + S[j]) % 256]) )\n\n return \"\".join(rc4_msg)\n\n","sub_path":"algorithms/rc4.py","file_name":"rc4.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"416409338","text":"\"Run some basic tests against the docker-compose app\"\nimport logging\nfrom urllib.parse import urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nlogging.basicConfig(level=logging.DEBUG)\n\nBASE_URL = 'http://localhost:8000/'\n\ns = requests.Session()\n\n# uWSGI should return a 400 error for requests with a bad Host header\nr = s.get(urljoin(BASE_URL, ''), headers={'Host': 'badhost.com'})\nassert r.status_code == 400, r.status_code\n# uWSGI just returns an empty response. If the request makes it to Django\n# (which it shouldn't), this will be b'

Bad Request (400)

'.\nassert r.content == b'', r.content\n\n# Our project doesn't have a homepage URL\nr = s.get(urljoin(BASE_URL, ''))\nassert r.status_code == 404, r.status_code\n\n# We should still be able to get to the admin\nr = s.get(urljoin(BASE_URL, 'admin/'))\nassert r.status_code == 200, r.status_code\n\n# Which, in turn, should have some CSS files we can try to download\nsoup = BeautifulSoup(r.content, features=\"html.parser\")\nfor link_href in [l.get('href') for l in soup.find_all('link')]:\n # If static files fail to download, uWSGI must not be set up properly to\n # serve them.\n r = s.get(urljoin(BASE_URL, link_href))\n assert r.status_code == 200, \\\n 'r.status_code=%s, link_href=%s' % (r.status_code, link_href)\n # If there's no 'Expires' header, uWSGI probably didn't get built with\n # regexp support (likely due to a missing system package).\n assert 'Expires' in r.headers, \\\n 'r.headers=%s, link_href=%s' % (r.headers, link_href)\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"267793718","text":"import sys\nimport os\n\nwd = os.getcwd()\nprint('Reading new bins...')\nNew_bins_file = wd + '/New_bins/New_bins.csv'\nNew_bins = open(New_bins_file, 'r', encoding = \"utf-8\")\nNew_bins_dic = {}\nfor line in New_bins:\n line_list = list(line.split('\\t'))\n Solyc_new = line_list[0].replace('\"', '').split('.')\n Solyc_new_short = Solyc_new[0].replace('s','S')\n New_bins_dic[Solyc_new_short.replace('\"', '')] = line_list[-1].rstrip(\"\\n\").replace('\"', '')\n #print(Solyc_new)\nBinCode_BinName_file = wd + '/dic_files/BinCode_BinName.csv'\nBinCode_BinName = open(BinCode_BinName_file, 'r', encoding = \"utf-8\")\nBinCode_BinName_dic = {}\nfor line in BinCode_BinName:\n line_list = list(line.split('\\t'))\n BinCode = line_list[0].lstrip('\"').rstrip('\"')\n BinCode_BinName_dic[BinCode] = line_list[1].lstrip('\"').rstrip('\"')\n #print(BinCode + '\\t' + BinCode_BinName_dic[BinCode])\n\ninput_directory = wd + '/input'\nfor currentpath, folders, files in os.walk(input_directory):\n for file in files:\n mapping_file = input_directory + '/' + file\n mapping = open(mapping_file, 'r', encoding = \"utf-8\")\n mapping_file_name_path = mapping_file.split('/')\n mapping_file_name_extension = mapping_file_name_path[-1].split('.')\n mapping_file_name = mapping_file_name_extension[0]\n print(\"Edited mapping file... \" + mapping_file_name_path[-1] + '\\tSolyc\\t\\tOld_bin\\t\\tNew_bin')\n output_file = wd + '/output/' + mapping_file_name + \"_custom.txt\"\n output = open(output_file, 'w', encoding = 'utf-8')\n Solyc_short_set = set() \n for line in mapping:\n line_list = line.split('\\t')\n Solyc = line_list[2].replace('\"', '').replace('s','S').replace(\"'\",\"\").split('.')\n Bin = line_list[0].replace('\"', '').replace(\"'\",\"\").split('.')\n Bin_1st = Bin[0]\n Solyc_short = Solyc[0]\n set_list = list(Solyc_short_set)\n dot = '.'\n if Solyc_short in New_bins_dic.keys() and Bin_1st == '35' and Solyc_short not in set_list:\n \n New_bins = New_bins_dic[Solyc_short]\n New_bins_list = New_bins.split('|')\n counter = list(range(0,len(New_bins_list)))\n for i in counter:\n print('Removed not assigned\\t' + dot.join(Solyc) + '\\tfrom bin\\t' + dot.join(Bin) + '\\tand added to\\t' + New_bins_list[i])\n output.write(\"'\" + New_bins_list[i] + \"'\\t'\"\n + BinCode_BinName_dic[New_bins_list[i]] + \"'\\t\"\n + line_list[2] + \"\\t\"\n + line_list[3] + \"\\n\")\n \n elif Solyc_short in New_bins_dic.keys() and Bin_1st != '35' and Solyc_short not in set_list:\n New_bins = New_bins_dic[Solyc_short]\n New_bins_list = New_bins.split('|')\n counter = list(range(0,len(New_bins_list)))\n for i in counter:\n print('replaced\\t' + dot.join(Solyc) + '\\tfrom bin\\t' + dot.join(Bin) + '\\tto\\t' + New_bins_list[i])#print(New_bins_list)\n output.write(line)#(remember to remove that)\n output.write(\"'\" + New_bins_list[i] + \"'\\t'\"\n + BinCode_BinName_dic[New_bins_list[i]]+ \"'\\t\"\n + line_list[2] + \"\\t\"\n + line_list[3] + \"\\n\")\n elif Solyc_short not in New_bins_dic.keys() and Solyc_short not in set_list:\n output.write(line)\n if Solyc_short != '':\n Solyc_short_set.add(Solyc_short)\nprint('Done!')\n","sub_path":"MapMan_mapping_file_editor.py","file_name":"MapMan_mapping_file_editor.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"612305917","text":"# Authors: payam.kavousi@gmail.com\n\"\"\"\nThis module provides a two step SKlearn pipeline for preprocessing\n\"\"\"\n\nfrom feature_engine.encoding import OrdinalEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom user_similarity_model.config.core import config\n\npipe = Pipeline(\n [\n # ==== Categorical encoding\n # latest_interest_tag and latest_assessment_tag\n (\n \"Targetencoder\",\n OrdinalEncoder(\n encoding_method=\"ordered\",\n variables=config.model_config.categorical_vars,\n ),\n ),\n (\"scaler\", MinMaxScaler()),\n ]\n)\n","sub_path":"user_similarity_model/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"8337703","text":"import time\nimport datetime\nimport sys\n\nmonth = eval(time.strftime(\"%m\")[1])\ndays = [31 , 28 , 31 , 30 , 31 , 30 , 31 , 31 , 30 , 31 , 30 , 31]\ntry :\n dayCheck = int(eval(sys.argv[2]))\n monthCheck = int(eval(sys.argv[1]))\n yearCheck = int(eval(sys.argv[3]))\nexcept :\n print(\"Non-numerical day/month detected.\")\n sys.exit()\n\nif monthCheck < 1 or monthCheck > 12 :\n print(\"error : invalid month : {}\".format(monthCheck))\n sys.exit()\n\nprint(\"Counting time until : {}/{}/{}\".format(monthCheck , dayCheck , yearCheck))\n\n\ntimeStr = time.strftime(\"%d\")\nif timeStr[0] == '0' :\n day = eval(timeStr[1])\nelse :\n day = eval(timeStr)\n\ntimeStr = time.strftime(\"%m\")\nif timeStr[0] == '0' :\n month = eval(timeStr[1])\nelse :\n month = eval(timeStr[2])\n\nyear = eval(time.strftime(\"%Y\"))\n\ndayName = time.strftime(\"%A\")\nmonthName = time.strftime(\"%B\")\nprint(\"Today is : {}, {} {} , {}/{}/{}\".format(dayName , monthName , day , month , day , year))\n\ndaysToGo = 0\n\n\ndaysUntilCheck = 0\nfor i in range(monthCheck - 1) :\n daysUntilCheck = daysUntilCheck + days[i]\ndaysUntilCheck = daysUntilCheck + dayCheck\n\nif yearCheck > year :\n for i in range(yearCheck - year) :\n daysUntilCheck = daysUntilCheck + 365\n\ndate = datetime.datetime.now()\ntemp = time.strftime(\"%j\")\nif temp[0] == '0' and temp[1] == '0' :\n daysUntilToday = int(eval(temp[2]))\nelif temp[0] == '0' :\n daysUntilToday = int(eval(temp[1] + temp[2]))\nelse :\n daysUntilToday = int(eval(temp))\n\ndaysToGo = daysUntilCheck - daysUntilToday\n\nstring = \"{}\".format(date)\nmicroString = string.split('.')[1]\ntry :\n micros = eval(microString)\nexcept :\n #Very temporary workaround of an error thrown by eval\n #when the value begins with 0. Will implement a solution\n #at a later time.\n micros = 84722\n\nseconds = eval(\"{}\".format(date.second))\nminutes = eval(\"{}\".format(date.minute))\nhours = eval(\"{}\".format(date.hour))\nfor i in range(seconds) : \n micros = micros + 1000000\nfor i in range(minutes) :\n micros = micros + 60000000\nfor i in range(hours) :\n micros = micros + 3600000000\ntimeLeftInDay = 86400000000 - micros\nhoursUntil = 0\nmicrosUntil = 0\nsecondsUntil = 0\nminutesUntil = 0\nwhile timeLeftInDay > 0 :\n if timeLeftInDay >= 3600000000 :\n timeLeftInDay = timeLeftInDay - 3600000000\n hoursUntil = hoursUntil + 1\n elif timeLeftInDay >= 60000000 :\n timeLeftInDay = timeLeftInDay - 60000000\n minutesUntil = minutesUntil + 1\n elif timeLeftInDay >= 1000000 :\n timeLeftInDay = timeLeftInDay - 1000000\n secondsUntil = secondsUntil + 1\n else :\n microsUntil = timeLeftInDay\n timeLeftInDay = 0\n\nprint(\"Time until date : {} Days, {} Hours, {} Minutes, {} Seconds, {} Microseconds.\".format(daysToGo - 1, hoursUntil , minutesUntil , secondsUntil , microsUntil))\n\n","sub_path":"daysTilLisa.py","file_name":"daysTilLisa.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"356738304","text":"\"\"\"MLOps Library\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import Ridge\nimport joblib\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\ndef load_model(model=\"model.joblib\"):\n \"\"\"Grabs model from disk\"\"\"\n\n clf = joblib.load(model)\n return clf\n\n\ndef data():\n df = pd.read_csv(\"htwtmlb.csv\")\n return df\n\n\ndef retrain(tsize=0.1, model_name=\"model.joblib\"):\n \"\"\"Retrains the model\n\n See this notebook: Baseball_Predictions_Export_Model.ipynb\n \"\"\"\n df = data()\n y = df[\"Height\"].values # Target\n y = y.reshape(-1, 1)\n X = df[\"Weight\"].values # Feature(s)\n X = X.reshape(-1, 1)\n scaler = StandardScaler()\n X_scaler = scaler.fit(X)\n X = X_scaler.transform(X)\n y_scaler = scaler.fit(y)\n y = y_scaler.transform(y)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=tsize, random_state=3\n )\n clf = Ridge()\n model = clf.fit(X_train, y_train)\n accuracy = model.score(X_test, y_test)\n logging.debug(f\"Model Accuracy: {accuracy}\")\n joblib.dump(model, model_name)\n return accuracy, model_name\n\n\ndef format_input(x):\n \"\"\"Takes int and converts to numpy array\"\"\"\n\n val = np.array(x)\n feature = val.reshape(-1, 1)\n return feature\n\n\ndef scale_input(val):\n \"\"\"Scales input to training feature values\"\"\"\n\n df = data()\n features = df[\"Weight\"].values\n features = features.reshape(-1, 1)\n input_scaler = StandardScaler().fit(features)\n scaled_input = input_scaler.transform(val)\n return scaled_input\n\n\ndef scale_target(target):\n \"\"\"Scales Target 'y' Value\"\"\"\n\n df = data()\n y = df[\"Height\"].values # Target\n y = y.reshape(-1, 1) # Reshape\n scaler = StandardScaler()\n y_scaler = scaler.fit(y)\n scaled_target = y_scaler.inverse_transform(target)\n return scaled_target\n\n\ndef height_human(float_inches):\n \"\"\"Takes float inches and converts to human height in ft/inches\"\"\"\n\n feet = int(round(float_inches / 12, 2)) # round down\n inches_left = round(float_inches - feet * 12)\n result = f\"{feet} foot, {inches_left} inches\"\n return result\n\n\ndef human_readable_payload(predict_value):\n \"\"\"Takes numpy array and returns back human readable dictionary\"\"\"\n\n height_inches = float(np.round(predict_value, 2))\n result = {\n \"height_inches\": height_inches,\n \"height_human_readable\": height_human(height_inches),\n }\n return result\n\n\ndef predict(weight):\n \"\"\"Takes weight and predicts height\"\"\"\n\n clf = load_model() # loadmodel\n np_array_weight = format_input(weight)\n scaled_input_result = scale_input(np_array_weight) # scale feature input\n scaled_height_prediction = clf.predict(scaled_input_result) # scaled prediction\n height_predict = scale_target(scaled_height_prediction)\n payload = human_readable_payload(height_predict)\n predict_log_data = {\n \"weight\": weight,\n \"scaled_input_result\": scaled_input_result,\n \"scaled_height_prediction\": scaled_height_prediction,\n \"height_predict\": height_predict,\n \"human_readable_payload\": payload,\n }\n logging.debug(f\"Prediction: {predict_log_data}\")\n return payload\n","sub_path":"mlib.py","file_name":"mlib.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"231405637","text":"import os\nfrom math import ceil\n\nfrom django.db import models\nimport solo.models\nfrom django.utils.text import get_valid_filename\n\nfrom django.conf import settings\nDISPENSER_CHOICES = [(i, i) for i in range(settings.PUMPS_NB)]\n\n\ndef _cut(value, low=None, high=None):\n if low:\n value = max(low, value)\n if high:\n value = min(high, value)\n return value\n\n\nclass Ingredient(models.Model):\n name = models.CharField(unique=True, max_length=50)\n alcohol_percentage = models.FloatField(\n help_text='Should be between 0 and 100'\n )\n density = models.FloatField(\n help_text='In grams per liter [%s]' % settings.UNIT_DENSITY,\n default=settings.UNIT_DENSITY_DEFAULT\n )\n added_separately = models.BooleanField(\n default=False,\n )\n\n def save(self, *args, **kwargs):\n self.alcohol_percentage = _cut(self.alcohol_percentage, low=0, high=100)\n self.density = _cut(self.density, low=0)\n super(Ingredient, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.name\n\n def dispensers(self, filter_out_empty):\n dispensers = self.dispenser_set.all()\n if filter_out_empty:\n dispensers = dispensers.filter(is_empty=False)\n return dispensers\n\n def is_available(self):\n return self.added_separately or self.dispensers(filter_out_empty=settings.EMPTY_DISPENSER_MAKES_MIX_NOT_AVAILABLE).exists()\n\n @staticmethod\n def available_ingredients(ingredients_in_dispensers=None, include_added_separately=False):\n if ingredients_in_dispensers is None:\n ingredients_in_dispensers = Dispenser.ingredients_in_dispensers(filter_out_empty=settings.EMPTY_DISPENSER_MAKES_MIX_NOT_AVAILABLE)\n ingredients = Ingredient.objects.filter(pk__in=ingredients_in_dispensers)\n if include_added_separately:\n return ingredients.union(Ingredient.objects.filter(added_separately=True))\n else:\n return ingredients\n\n @staticmethod\n def alcohols():\n return Ingredient.objects.exclude(alcohol_percentage=0)\n\n @staticmethod\n def available_alcohols(ingredients_in_dispensers=None):\n if ingredients_in_dispensers is None:\n ingredients_in_dispensers = Dispenser.ingredients_in_dispensers(filter_out_empty=settings.EMPTY_DISPENSER_MAKES_MIX_NOT_AVAILABLE)\n return Ingredient.alcohols().filter(id__in=ingredients_in_dispensers)\n\n\ndef mix_upload_to(instance, filename):\n new_filename = get_valid_filename(instance.name)\n if len(filename.split('.')) > 1: # keep extension\n new_filename += '.' + filename.split('.')[-1]\n return os.path.join(settings.UPLOAD_FOR_MIX, new_filename)\n\n\nclass Mix(models.Model):\n\n class Meta:\n verbose_name_plural = 'Mixes'\n\n updated_at = models.DateTimeField(auto_now=True)\n name = models.CharField(unique=True, max_length=50)\n ingredients = models.ManyToManyField(\n Ingredient,\n through='Dose',\n related_name='in_mixes',\n )\n likes = models.PositiveSmallIntegerField(default=0)\n count = models.PositiveSmallIntegerField(default=0)\n image = models.ImageField(\n max_length=200,\n height_field='image_height',\n width_field='image_width',\n upload_to=mix_upload_to,\n null=True,\n blank=True,\n )\n image_height = models.PositiveIntegerField(null=True)\n image_width = models.PositiveIntegerField(null=True)\n description = models.TextField(\n blank=True,\n )\n verified = models.BooleanField(\n default=False\n )\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n for dose in self.doses:\n dose.set_quantity_to_zero_if_not_required()\n super(Mix, self).save(*args, **kwargs)\n\n @property\n def doses(self):\n return Dose.objects.filter(mix=self)\n\n def ordered_doses(self):\n return self.doses.order_by('number')\n\n def real_ingredients(self):\n return self.ingredients.filter(added_separately=False)\n\n @property\n def alcohol_percentage(self):\n q_and_p = self.doses.values_list('quantity', 'ingredient__alcohol_percentage')\n if len(q_and_p) == 0:\n return 0\n try:\n percentage = sum(map(lambda qp: qp[0] * qp[1], q_and_p))/sum(map(lambda qp: qp[0], q_and_p))\n return ceil(10 * percentage) / 10\n except ZeroDivisionError:\n return 0\n\n @property\n def volume(self):\n return sum(self.doses.values_list('quantity', flat=True))\n\n @property\n def weight(self):\n q_and_d = self.doses.values_list('quantity', 'ingredient__density')\n return sum(map(lambda qd: qd[0] * settings.UNIT_CONVERSION_VOLUME_SI * qd[1], q_and_d))\n\n def is_available(self):\n return all(ingredient.is_available() for ingredient in self.ingredients.all())\n\n def calibrate_volume_to(self, desired_total):\n \"\"\"Look out you respect the correct units\"\"\"\n volume = self.volume\n for dose in self.doses:\n dose.quantity = dose.quantity * desired_total / volume\n dose.save()\n\n @staticmethod\n def filter_by_available(mixes=None):\n available_ingredients_in_dispenser = Ingredient.available_ingredients(include_added_separately=False)\n mixes = mixes if mixes is not None else Mix.objects.all()\n mixes_with_at_least_one_ingredient = mixes.filter(\n ingredients__in=available_ingredients_in_dispenser\n ).distinct()\n return filter(\n lambda mix: all(\n ingredient in available_ingredients_in_dispenser\n for ingredient in mix.real_ingredients()\n ),\n mixes_with_at_least_one_ingredient\n )\n\n @staticmethod\n def naive_available(mixes=None):\n mixes = mixes if mixes is not None else Mix.objects.all()\n available = []\n for mix in mixes:\n if mix.is_available():\n available.append(mix)\n return available\n\n\nclass Dose(models.Model):\n mix = models.ForeignKey(Mix, on_delete=models.CASCADE)\n ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE)\n quantity = models.FloatField(\n help_text='In %s [%s]' % (settings.UNIT_VOLUME_VERBOSE, settings.UNIT_VOLUME)\n )\n number = models.PositiveSmallIntegerField(\n help_text='The number in which order the dose must be served'\n )\n\n @property\n def weight(self):\n return self.ingredient.density * (self.quantity * settings.UNIT_CONVERSION_VOLUME_SI)\n\n def __str__(self):\n if self.ingredient.added_separately:\n return str(self.ingredient)\n else:\n return '{} {} of {}'.format(self.quantity, settings.UNIT_VOLUME, self.ingredient)\n\n def save(self, *args, **kwargs):\n self.quantity = _cut(self.quantity, low=0)\n super(Dose, self).save(*args, **kwargs)\n\n def is_available(self):\n return self.ingredient.is_available() # if self.required else True\n\n def set_quantity_to_zero_if_not_required(self):\n if self.ingredient.added_separately:\n self.quantity = 0\n self.save()\n\n @property\n def required(self):\n return not self.ingredient.added_separately\n\n\nclass Dispenser(models.Model):\n updated_at = models.DateTimeField(auto_now=True)\n number = models.PositiveSmallIntegerField(\n unique=True,\n choices=DISPENSER_CHOICES\n )\n ingredient = models.ForeignKey(\n Ingredient,\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n limit_choices_to={'added_separately': False},\n )\n is_empty = models.BooleanField()\n\n def __str__(self):\n return 'Dispenser {} with {}'.format(self.number, self.ingredient)\n\n def save(self, *args, **kwargs):\n if not self.ingredient:\n self.is_empty = True\n super(Dispenser, self).save(*args, **kwargs)\n\n @staticmethod\n def ingredients_in_dispensers(filter_out_empty):\n dispensers = Dispenser.objects.all()\n if filter_out_empty:\n dispensers = dispensers.filter(is_empty=False)\n return dispensers.values_list('ingredient', flat=True)\n\n\nclass Order(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n mix = models.ForeignKey(\n Mix,\n on_delete=models.SET_NULL, # keep command in history even in mix is deleted\n null=True,\n blank=True,\n )\n status = models.PositiveSmallIntegerField(choices=settings.SERVING_STATES_CHOICES, default=0)\n accepted = models.BooleanField(default=False)\n\n def __str__(self):\n if self.mix:\n return 'Order of one {}'.format(self.mix)\n else:\n return 'Empty order'\n\n def status_verbose(self):\n return settings.SERVING_STATES_CHOICES[self.status][1]\n\n\nclass Configuration(solo.models.SingletonModel):\n updated_at = models.DateTimeField(auto_now=True)\n show_only_available_mixes = models.BooleanField(default=False)\n\n class Meta:\n verbose_name = \"Configuration\"\n\n def __str__(self):\n return 'Configuration'\n","sub_path":"recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"42489749","text":"\n#ImportModules\nimport ShareYourSystem as SYS\n\n#Definition \nMyModeler=SYS.ModelerClass(\n\t\t**{\n\t\t\t'FolderingPathVariable':SYS.Modeler.LocalFolderPathStr,\n\t\t\t'HdformatingFileKeyStr':'Thing1.hdf',\n\t\t\t'ModelKeyStrsList':[\t\n\t\t\t\t'MyStr',\n\t\t\t\t'MyIntsList'\n\t\t\t]\n\t\t}\n\t).model(\n\t)\n\n#Build a structure with a database\nSYS.mapSet(\n\t\tMyModeler.ModeledHdfTable,\n\t\t[\n\t\t\t('row.__setitem__',{'#liarg':('MyStr',\"hello\")}),\n\t\t\t('row.append',{'#liarg':None}),\n\t\t\t('row.__setitem__',{'#liarg':('MyStr',\"bonjour\")}),\n\t\t\t('row.__setitem__',{'#liarg':('MyIntsList',[1])}),\n\t\t\t('row.append',{'#liarg':None}),\n\t\t\t#('row.__setitem__',{'#liarg':('MyStr',\"bonjour\")}), \n\t\t\t#('row.__setitem__',{'#liarg':('MyIntsList',[1,3])}), \n\t\t\t#THIS would bring an error because list has to be size=1\n\t\t\t#('row.append',{'#liarg':None}),\n\t\t\t('flush',{'#liarg':None})\n\t\t]\n)\n\n#Definition the AttestedStr\nprint('MyModeler is ')\nSYS._print(MyModeler)\n\n#view\nprint('hdf5 file is : \\n'+SYS._str(MyModeler.hdfview()))\n\n#close\nMyModeler.file(_ModeStr='c')\n\n\n","sub_path":"Pythonlogy/build/lib/ShareYourSystem/Standards/Modelers/Modeler/03_ExampleDoc.py","file_name":"03_ExampleDoc.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"99538711","text":"import sys, os\n\nextensions = []\n\ntemplates_path = ['_templates']\n\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\nproject = 'KEGGscape'\ncopyright = 'Kozo Nishida, Keiichiro Ono'\n\nversion = '0.8.2'\nrelease = '0.8.2'\n\nexclude_trees = ['_build']\n\npygments_style = 'sphinx'\n\nhtml_theme = 'default'\n\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'KEGGscape'\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"538703666","text":"# Write a function that accepts two positive integers as function parameters & returns their least common multiple(LCM)\n# The LCM of two integers a and b is the smallest (non zero) positive integer that is divisible by both a and b.\n# For example, the LCM of 4 and 6 is 12, the LCM of 10 and 5 is 10.\n\ndef lcm(a, b):\n num = 1\n while True:\n if num % a == 0 and num % b ==0:\n lcmNum = num\n break\n num = num + 1\n return lcmNum\n\n\n","sub_path":"CSE1309x/leastCommonMultiple.py","file_name":"leastCommonMultiple.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"284094335","text":"#!/usr/bin/python3\n\nf = open('p096_sudoku.txt', 'r')\np = []\nw = []\n\nfor n, l in enumerate(f.readlines()):\n if n % 10 != 0:\n w.append( [ int(x) for x in l[:-1] ] )\n elif n > 0:\n p.append(w.copy())\n w = []\n\ndef free(sudoku, x, y):\n f = set( [ i for i in range(1, 10) ] )\n\n for i in range(0, 9):\n f.discard(sudoku[x][i])\n f.discard(sudoku[i][y])\n\n x2 = x//3\n y2 = y//3\n\n for i in range(0, 3):\n for j in range(0, 3):\n f.discard(sudoku[3*x2+i][3*y2+j])\n\n return f\n\n\ndef solve(sudoku, lvl):\n zeros = 0\n for i in range(0, 9):\n for j in range(0, 9):\n if sudoku[i][j] == 0:\n zeros += 1\n\n if zeros == 0:\n w = sudoku[0][0] * 100 + sudoku[0][1] * 10 + sudoku[0][2]\n print(w)\n return w\n\n for i in range(0, 9):\n for j in range(0, 9):\n if sudoku[i][j] == 0:\n q = free(sudoku, i, j)\n if len(q) == 0:\n return -1\n\n for f in q:\n sudoku[i][j] = f\n r = solve(sudoku, lvl+1)\n\n if r > 0:\n return r\n\n sudoku[i][j] = 0\n\n return -1\n\n return -1\n\nq = []\nfor n, i in enumerate(p):\n print(n+1, end=\" \")\n q.append(solve(i, 1))\n\nprint(sum(q))\n","sub_path":"96_su_doku.py","file_name":"96_su_doku.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"474392262","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\n\nrecord = []\n\nfor i in range(30):\n j = i+1\n GPIO.setup(j, GPIO.OUT)\n print(f'Testing chanel : {j} ')\n GPIO.output(j, True)\n time.sleep(2)\n component = input('Enter the component name:')\n record.append([j, component])\n GPIO.cleanup()\n\nprint(record)","sub_path":"chanel_test.py","file_name":"chanel_test.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"340369359","text":"from collections import deque\n\nresult = deque()\n\ndef enqueue(num):\n\n\n if len(result) == 0:\n result.append(num)\n\n elif len(result) == 1:\n if result[0] > num:\n result.append(result[0])\n result[0] = num\n else:\n result.append(num)\n\n else:\n for i in range(len(result)):\n if result[0+ i] < num < result[1+i]:\n result.insert(1+i, num)\n\n return result\n\nenqueue(1)\nenqueue(5)\nenqueue(2)\nenqueue(4)\nprint(enqueue(3))\n\n\n##########################################\n\ndef insert_sort(a):\n sd = deque()\n sd.append(a[0])\n\n for i in range(1, len(a)):\n pos = i\n for j in range(i-1, -1, -1):\n if sd[j] > a[i]:\n pos = j\n sd.insert(pos, a[i])\n\n return sd\n\n\n\n\n\n","sub_path":"Algorithm/190902/리스���_연습문제3.py","file_name":"리스트_연습문제3.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"593042151","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 24 14:10:49 2015\n\n@author: dan\n\"\"\"\n\nimport sys, os\nfrom copy import deepcopy\nsys.path.append(os.path.expanduser('~/git/kvivo_max/scripts/'))\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport sys, os\nfrom collections import defaultdict\nfrom catalytic_rates import rates\nfrom cobra.flux_analysis.parsimonious import optimize_minimal_flux\nfrom cobra.flux_analysis.single_deletion import single_gene_deletion\nfrom matplotlib_venn import venn3\nfrom cobra.io.sbml import create_cobra_model_from_sbml_file\nfrom cobra.manipulation import delete_model_genes\nfrom cobra.core.Gene import parse_gpr, eval_gpr\nfrom collections import Counter\nimport csv\nsys.path.append(os.path.expanduser('~/git/across-projects'))\nfrom plot_types import cdf\n \nR = rates()\ngc = R.gc[R.gc['growth mode']=='batch']\n#gc = R.gc\nremove = R.gc['comments'].dropna()\ngc = gc.drop(remove.index)\ngr = gc['growth rate [h-1]'][gc.index]\n\nSAmax = R.SAmax['max specific activity [umol/mg/min]']\nefficiency = R.SA.div(SAmax,axis='rows')[gc.index]\n\n# all expressed enzymes in batch growth in units of mg/gCDW\nall_enzymes = set(R.genes.keys())\nexpression = pd.DataFrame.from_csv('../data/protein_abundance[copies_fl].csv')\nexpression = R._convert_copies_fL_to_mmol_gCDW(expression)\n\n\ndef get_functional_group(group_name, extend_fname):\n\n j = 0 \n systematic_level = 3\n genes = []\n for row in csv.reader(open(extend_fname, 'r'), delimiter='\\t'):\n if len(row) < 3:\n continue\n if j != 0:\n if row[2] != '':\n break \n genes.append(row[systematic_level]) \n \n if row[2] == group_name:\n j = 1\n return genes\n \nribosomal_proteins = get_functional_group('Ribosome', '../data/ecoli_extend.csv')\n\nc = 'GLC_BATCH_mu=0.58_S'\nribosome_exp = expression.loc[ribosomal_proteins]\nribosome_exp = ribosome_exp.mean()[gc.index] # mmol/gCDW\naa_flux = gr * (0.55/110) * 1000 / 3600# mmol/gCDW/s\nf_per_ribo = aa_flux / ribosome_exp # aa/ribosome/s\n\nplt.figure()\nplt.scatter(gr,f_per_ribo)\nplt.xlabel('growth rate [h-1]')\nplt.ylabel('AA per ribosome [s-1]')\nplt.xlim(0)\nplt.ylim(0)\n\nplt.figure()\nplt.plot(gr,ribosome_exp, 'o')\nplt.xlabel('growth rate [h-1]')\nplt.ylabel('ribosomes [mmol/gCDW]')\nplt.xlim(0)\nplt.ylim(0)\n#enzymes = R._convert_mmol_gCDW_to_mg_gCDW(R.expression_data[gc.index])\n#enzymes.dropna(how='all', inplace=True)\n#\n#enzymes = enzymes[c].dropna()\n#knockouts = set(all_enzymes) - set(enzymes.index)\n#knockouts.remove('s0001')\n#\n##model = create_cobra_model_from_sbml_file('../data/iJO1366.xml')\n#model = R.model\n#essentiality = single_gene_deletion(model, knockouts)\n#essential = set([model.genes.get_by_id(k) for k,v in essentiality[0].iteritems() if v==0])\n#\n#flux = R.flux_data * 3600\n#flux = flux[c]\n#tmp = {}\n#for g in essential:\n# for r in g.reactions:\n# tmp[g.id] = g.name\n# try:\n# i = flux[r.id] / flux.median()\n## if i > 1e-2:\n## print g\n# except:\n# continue\n\n#\n#def support_flux(flux):\n# reactions = [R.rxns[r] for r in flux.index]\n# out = set()\n# for r in reactions:\n# for g in list(r.genes):\n# out.add(g.id)\n# return out\n#\n#x = enzymes[c].dropna()\n#expressed = set(x.index)\n#y = flux[c].dropna()\n#carry_flux = support_flux(y)\n#\n#fig = plt.figure()\n#ax = plt.axes()\n#venn3([all_enzymes, expressed, carry_flux], \n# ['enzymes\\n%s'%len(all_enzymes), 'expressed\\n%s'%len(expressed), 'support flux\\n%s'%len(carry_flux)], ax=ax)\n#\n#a = carry_flux - expressed\n#out = set()\n#i = 0\n#for g in a:\n# gene = R.model.genes.get_by_id(g)\n# for r in list(gene.reactions):\n# if 'or' in r.gene_reaction_rule:\n# tree, clean = parse_gpr(r.gene_reaction_rule)\n# i = 1\n# break\n# if i == 1:\n# break\n \n \n \n\n\n\n#util = utilized_enzymes(x,y)\n#def utilized_enzymes(expression, flux):\n# out = set()\n# for g in expression.index:\n# e = R.genes[g]\n# reactions = map(lambda x: x.id, e.reactions)\n# if set(reactions) & set(flux.index):\n# out.add(e.id)\n# return out\n# \n#for c in gc.index:\n# x = enzymes[c].dropna()\n# expressed = set(x.index)\n# y = flux[c].dropna()\n# carry_flux = support_flux(y)\n# util = utilized_enzymes(x,y)\n# break\n# \n#fig = plt.figure()\n#ax = plt.axes()\n#venn3([all_enzymes, expressed, carry_flux], \n# ['enzymes\\n%s'%len(all_enzymes), 'expressed\\n%s'%len(expressed), 'support flux\\n%s'%len(util)], ax=ax)\n#plt.savefig('../res/enzymes_that_carry_flux.svg')\n\n\n\n'''\n\n \n\n#def perform_pFBA(model, cs, gr, ur):\n#\n# rxns = dict([(r.id, r) for r in model.reactions])\n# rxns['EX_glc_e'].lower_bound = 0 # uptake of carbon source reaction is initialized \n# try:\n# rxns['EX_' + cs + '_e'].lower_bound = -ur # redefine sole carbon source uptake reaction in mmol/gr/h\n# except:\n# print cs, ur\n# rxns['EX_glc_e'].lower_bound = -ur\n# rxns['Ec_biomass_iJO1366_core_53p95M'].upper_bound = gr \n# print \"solving pFBA\"\n# solution = optimize_minimal_flux(model, already_irreversible=True)\n# print cs, solution.f\n# flux_dist = pd.DataFrame(model.solution.x_dict.items()).set_index(0)\n# \n# return flux_dist \n#\n#fluxes = pd.DataFrame(index=R.rxns.keys(), columns=gc.index)\n#for c in gc.iterrows():\n# x = enzymes[c[0]].dropna()\n#\n# model = deepcopy(R.model)\n#\n# not_expressed = all_enzymes - set(x.index)\n# not_expressed = map(model.genes.get_by_id, not_expressed)\n## delete_model_genes(model, not_expressed)\n#\n# cs = c[1]['media_key']\n# gr = c[1]['growth rate [h-1]']\n# ur = c[1]['uptake rate [mmol gCDW-1 h-1]']\n# if np.isnan(ur):\n# ur = 18.5\n# try: \n# fluxes[c[0]] = perform_pFBA(model, cs, gr, ur)\n# except:\n# print cs\n# break\n\n\ndef bootstrap(x, w):\n x = x.dropna()\n w = w.dropna()\n ix = x.index & w.index\n x = x[ix].values\n w = w[ix].values\n Mw = np.zeros(1000)\n for i in xrange(1000):\n rand = np.random.choice(range(len(x)), len(x), replace=True)\n newx = x[rand]\n neww = w[rand]\n Mw[i] = sum(newx*neww)/sum(neww)\n return np.std(Mw)\n\n#plt.figure()\n#ax = plt.axes()\n#for c in gc.index:\n# cdf(enzymes[c], ax=ax)\n# print c, enzymes[c].median()\n#ax.set_xscale('log')\n\n\n\nunique = set([g for g in R.model.genes if len(g.reactions)==1])\n#index = set(proteins.index) & set(map(lambda x:x.id,enzymes))\nindex = set(proteins.index) & set(map(lambda x:x.id,unique))\nexpression = proteins.loc[index][gc.index]\nmg_gCDW = R._convert_mmol_gCDW_to_mg_gCDW(expression)\nmass = pd.DataFrame(index=efficiency.index, columns=gc.index)\n\nfor reac in mass.index:\n r = R.rxns[reac]\n genes = map(lambda x: x.id, r.genes)\n try:\n mass.loc[reac] = mg_gCDW.loc[genes].sum()\n except:\n continue\nmass.dropna(how='all', inplace=True)\n\nx = gc['growth rate [h-1]'][gc.index]\ny = np.zeros(len(gc))\nfor i,c in enumerate(gc.index):\n a = (mass[c]*efficiency[c]).dropna()\n y[i] = a.sum()/mass.loc[a.index][c].sum()\n#\nfig = plt.figure(figsize=(8,8))\nax = plt.axes()\n(intercept,slope), cov = curve_fit(lambda a,b,x: a*x+b, x, y)\nax.plot(x, slope*x+intercept, 'k:',zorder=0)\nfor j, i in enumerate(gc.index):\n c = gc.media_key[i]\n mode = gc['growth mode'][i]\n if mode == 'batch':\n ax.scatter(x[j],y[j],c='#ff4d4d',s=80,edgecolor='none',zorder=10)\n ax.errorbar(x[j],y[j],bootstrap(efficiency[i],mass[i]),c='r')\n ax.annotate(c,(x[j],y[j]+0.01),ha='center',va='baseline',size=15)\n else:\n ax.scatter(x[j],y[j], c='y',s=80,edgecolor='none', alpha=0.35)\nax.set_xlabel('growth rate [h$^{-1}$]', size=15)\nax.set_ylabel('effective capacity', size=15)\n[tick.label.set_fontsize(15) for tick in ax.xaxis.get_major_ticks()]\n[tick.label.set_fontsize(15) for tick in ax.yaxis.get_major_ticks()]\n\nplt.grid()\nax.set_xlim(np.floor(10*x.min())/10.,0.72)\nax.set_ylim(np.floor(10*x.min())/10.,0.72)\nplt.tight_layout()\nplt.savefig('../res/FIG1.png')\n\nfig = plt.figure()\nax = plt.axes()\ncm = plt.cm.get_cmap('Blues')\nfor i,c in enumerate(gc.index):\n a = R.kapp[c].dropna()\n# y = R.SA.loc[a.index][c]\n print a.median()\n cdf(a,color=cm(i/10.),ax=ax,lw=2.5)\nkmax = R.kmax['kmax per active site [s-1]'].dropna()\nkcat = R.kcat['kcat per active site [s-1]'].dropna()\ncdf(kmax,color='k',ax=ax,lw=2.5)\ncdf(kcat,color='y',ax=ax,lw=2.5)\n\nax.set_xscale('log')\nax.set_xlim(1e-2,1e3)\n\nfig = plt.figure()\nax = plt.axes()\nax.plot(x,mass.median(), 'ro')\nax.set_ylim(0,0.2)\n\n#z = {R.rxns[r].id:R.rxns[r].subsystem for r in WCE.index}\n#subsystems = defaultdict(list)\n#for key, value in sorted(z.iteritems()):\n# subsystems[value].append(key)\n#\n#colors = ColorMap(subsystems.keys())\n#for k,v in subsystems.iteritems():\n# array = matric.loc[v]\n# narray = array.div(array.mean(axis=1), axis=0)\n## narray.dropna(how='any', inplace=True)\n# g = gr[narray.columns]\n# print len(g), len(narray.columns)\n# \n# \n## print k, b\n# ax.plot(g, a*g+b, c=colors[k], marker='o', label=k)\n \n#\n#fig = plt.figure(figsize=(10,6))\n#ax = plt.axes(axisbg='0.95')\n#\n#plt.scatter(gr, matric.sum(), c='#4DB8FF', edgecolor='none', \n# s=50)\n## \n#labels = [gc['media'][c] for c in gr.index]\n#for i, txt in enumerate(labels):\n# ax.annotate(txt, (gr[i],matric.sum()[i]))\n##\n\n#plt.grid()\n\n#plt.savefig('../res/growth_rate_and_saturation.pdf')\n#x = a[-10:].index\n#colors = ColorMap(x)\n##plt.figure()\n##for r in x:\n## plt.plot(gr, CA.loc[r]/CA.loc[r][0], label=r, c = colors[r], marker='o')\n## plt.legend()\n#\n#plt.figure()\n#for r in x:\n# plt.plot(gr, E.loc[r], label=r, c = colors[r], marker='o')\n# plt.legend()\n# \n#plt.figure()\n#for r in x:\n# plt.plot(gr, V.loc[r], label=r, c = colors[r], marker='o')\n# plt.legend()\n# plt.figure()\n# plt.hist(a, 40)\n# print a['PSERT'] / gr[c]\n#\n\n#plt.tight_layout()\n#\n##fig = plt.figure(figsize=(6,6))\n##ax = plt.axes()\n##\n##for i, r in enumerate(efficiency.index):\n## x = weighted_concentration.loc[r].astype('float').dropna()\n## y = efficiency.loc[r].astype('float').dropna()\n##\n## z = gr[x.index]\n## z.sort\n## x = x[z.index] \n## y = y[z.index]\n## plt.scatter(x, y, c='b')\n## if i >= 0:\n## break\n###ax.set_xscale('log')\n##\n##\n### \n##fig = plt.figure(figsize=(6,6))\n##ax = plt.axes()\n##conditions = list(efficiency.columns)\n###conditions = [conditions[0:12]]# + [conditions[-1]]\n##cm = plt.cm.get_cmap('Greens')\n###cm = ['r']#, 'b']\n##for i, j in enumerate(conditions):\n## x = weighted_concentration[j].astype('float').dropna()\n## y = efficiency[j].astype('float').dropna()\n## \n## index = x.index & y.index\n## x = x.loc[index]\n## y = y.loc[index]\n## \n## plt.scatter(x, y, c=cm(1.0*i/len(conditions)), \n## edgecolor='none')\n##\n##[tick.label.set_fontsize(15) for tick in ax.xaxis.get_major_ticks()]\n##[tick.label.set_fontsize(15) for tick in ax.yaxis.get_major_ticks()]\n##ax.set_xlabel('log E [mg/gCDW]', size=15)\n##ax.set_ylabel('catalytic efficiency', size=15)\n##ax.set_xscale('log')\n##ax.set_xlim(1e-4,1e1)\n##ax.set_ylim(0,1.1)\n##\n##\n#\n##plt.scatter(gr, matric_theoretical, c='#8500AD', edgecolor='none', \n## s=50, label='relative to $k_{cat}$')\n##\n##a, p = curve_fit(lambda x, a: a*x, gr, matric_theoretical)\n##\n###gr = np.append(gr,1/a)\n###plt.plot(gr, a* gr)\n##\n###for i, c in enumerate(R.gc.index):\n### if R.gc['growth mode'][c] == 'batch':\n#### plt.scatter(gr[i], matric[c], c='k', edgecolor='none', \n#### s=50)\n### plt.scatter(gr[i], matric_theoretical[c], c='k', edgecolor='none', \n### s=50)\n##ax.set_xlim(0,1)\n##ax.set_ylim(0,0.4)\n###plt.legend(scatterpoints=1, loc=3, fontsize=15)#ax.plot([0,0.8],[0,0.8], '#993333')\n##plt.grid()\n##ax.set_xlabel('growth rate [h$^{-1}$]', size=15)\n##ax.set_ylabel('enzyome saturation', size=15)\n##\n##[tick.label.set_fontsize(15) for tick in ax.xaxis.get_major_ticks()]\n##[tick.label.set_fontsize(15) for tick in ax.yaxis.get_major_ticks()]\n##\n##plt.tight_layout()\n##\n##plt.savefig('../res/mass_efficiency.pdf')\n##\n##\n###fig = plt.figure(figsize=(6,6))\n###ax = plt.axes()\n###conditions = list(efficiency.columns)\n###conditions = [conditions[-10:-1]]# + [conditions[-1]]\n###cm = plt.cm.get_cmap('Greens')\n###cm = ['b']#, 'b']\n###for i, j in enumerate(conditions):\n### x = weighted_concentration[j].astype('float').dropna()\n### y = efficiency[j].astype('float').dropna()\n### \n### index = x.index & y.index\n### x = x.loc[index]\n### y = y.loc[index]\n### \n### plt.scatter(x, y, c=cm[i], \n### edgecolor='none')\n###\n###[tick.label.set_fontsize(15) for tick in ax.xaxis.get_major_ticks()]\n###[tick.label.set_fontsize(15) for tick in ax.yaxis.get_major_ticks()]\n###ax.set_xlabel('log E [mg/gCDW]', size=15)\n###ax.set_ylabel('catalytic efficiency', size=15)\n###ax.set_xscale('log')\n###ax.set_xlim(1e-4,1e1)\n###ax.set_ylim(0,1.1)\n'''","sub_path":"scripts/ribosomes.py","file_name":"ribosomes.py","file_ext":"py","file_size_in_byte":13149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"124079704","text":"import sys\n\nOUTPUT = sys.stdout\n\n\nclass html_meta(type):\n\n def __getattr__(cls, tag_name):\n\n def _init(self, **attrs):\n self._attrs = attrs\n\n def _print_opening_tag(self):\n print(f'<{tag_name}', end='', file=OUTPUT)\n for k, v in self._attrs.items():\n print(f' {k}=\"{v}\"', end='', file=OUTPUT)\n print('>', end='', file=OUTPUT)\n\n def _print_closing_tag(self):\n print(f'', end='', file=OUTPUT)\n\n def _enter(self):\n self._print_opening_tag()\n\n def _exit(self, exc_type, exc_val, exc_tb):\n self._print_closing_tag()\n\n def _call(self, text):\n self._print_opening_tag()\n print(text, end='', file=OUTPUT)\n self._print_closing_tag()\n\n tag_class = type(tag_name, (), {\n '__slots__': ('_attrs', ),\n '_print_opening_tag': _print_opening_tag,\n '_print_closing_tag': _print_closing_tag,\n '__init__': _init,\n '__enter__': _enter,\n '__exit__': _exit,\n '__call__': _call,\n })\n\n return tag_class\n\n\nclass html(metaclass=html_meta):\n pass\n\n\nif __name__ == '__main__':\n import unittest\n from io import StringIO\n\n class TestCase(unittest.TestCase):\n @classmethod\n def tearDownClass(cls):\n global OUTPUT\n OUTPUT = sys.stdout\n\n def setUp(self):\n global OUTPUT\n OUTPUT = StringIO()\n\n def tearDown(self):\n OUTPUT.close()\n\n def test_html_context_manager(self):\n assert isinstance(OUTPUT, StringIO) # for IDE method provision\n with html.u(style='color:red'):\n print('test', end='', file=OUTPUT)\n text = OUTPUT.getvalue()\n self.assertEqual(text, 'test')\n\n def test_html_call(self):\n assert isinstance(OUTPUT, StringIO) # for IDE method provision\n html.u(style='color:red')('test')\n text = OUTPUT.getvalue()\n self.assertEqual(text, 'test')\n\n suite = unittest.makeSuite(TestCase)\n runner = unittest.TextTestRunner()\n runner.run(suite)\n","sub_path":"util/html_tags_wrapper.py","file_name":"html_tags_wrapper.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"286242332","text":"from rest_framework import pagination, permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom testproject.testapp import models, serializers\n\n\nclass ProjectViewSet(ModelViewSet):\n queryset = models.Project.objects.all()\n serializer_class = serializers.ProjectSerializer\n pagination_class = pagination.LimitOffsetPagination\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n\n @action(detail=True)\n def ping(self, request, pk):\n models.Project.objects.filter(pk=pk).update(name='ping')\n return Response(status=201)\n\n @action(detail=False)\n def first(self, request):\n project = models.Project.objects.first()\n serializer = self.get_serializer(instance=project)\n return Response(serializer.data)\n","sub_path":"testproject/testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"571659655","text":"#!/usr/bin/env python\n \nimport os\nimport io\nimport sys\n\nstopword = [\":\",\",\",\"\",\" \",\"[\",\"]\",\"\\n\",\"{\",\"}\"]\n\nfilename = os.environ.get(\"mapreduce_map_input_file\")\nfor i in sys.stdin:\n if i.startswith(' \"data\"'):\n for j in i.split(\",\")[1:]:\n j=j.strip(''.join(stopword))\n if j not in stopword:\n print('{0:s}\\t{1:s},{2:d}'.format(j,filename,1))\n elif i.startswith(', [ '):\n for j in i.split(\",\"):\n j=j.strip(''.join(stopword))\n if j not in stopword:\n print('{0:s}\\t{1:s},{2:d}'.format(j,filename,1))\n","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"342143400","text":"import sys\r\n\r\nsys.stdin = open('file.in', 'r')\r\nsys.stdout = open('file.out', 'w')\r\n\r\ncard, door = map(int, input().split())\r\n\r\nsubjectnumber = 7\r\nloopsize = 0\r\npower = 1\r\nvalue = 1\r\n# Finding the card's loop size\r\nwhile value < card:\r\n value *= 7\r\n loopsize += 1\r\nwhile value != card:\r\n value *= 7\r\n value = value % 20201227\r\n loopsize += 1\r\nsubjectnumber = door\r\nvalue = 1\r\nloopsize = 8\r\nfor i in range(loopsize):\r\n value *= subjectnumber\r\n value = value % 20201227\r\nprint(value)\r\n","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"516280080","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 9.1\nСоздать функцию, которая генерирует конфигурацию для access-портов.\nФункция ожидает такие аргументы:\n- словарь с соответствием интерфейс-VLAN такого вида:\n {'FastEthernet0/12':10,\n 'FastEthernet0/14':11,\n 'FastEthernet0/16':17}\n- шаблон конфигурации access-портов в виде списка команд (список access_mode_template)\nФункция должна возвращать список всех портов в режиме access\nс конфигурацией на основе шаблона access_mode_template.\nВ конце строк в списке не должно быть символа перевода строки.\nВ этом задании заготовка для функции уже сделана и надо только продолжить писать само тело функции.\nПример итогового списк�� (перевод строки после каждого элемента сделан для удобства чтения):\n[\n'interface FastEthernet0/12',\n'switchport mode access',\n'switchport access vlan 10',\n'switchport nonegotiate',\n'spanning-tree portfast',\n'spanning-tree bpduguard enable',\n'interface FastEthernet0/17',\n'switchport mode access',\n'switchport access vlan 150',\n'switchport nonegotiate',\n'spanning-tree portfast',\n'spanning-tree bpduguard enable',\n...]\nПроверить работу функции на примере словаря access_config.\nОграничение: Все задания надо выполнять используя только пройденные темы.\n'''\n\n\ndef generate_access_config(intf_vlan_mapping, access_template, psecurity = None):\n '''\n intf_vlan_mapping - словарь с соответствием интерфейс-VLAN такого вида:\n {'FastEthernet0/12':10,\n 'FastEthernet0/14':11,\n 'FastEthernet0/16':17}\n access_template - список команд для порта в режиме access\n Возвращает список всех портов в режиме access с конфигурацией на основе шаблона\n '''\n list1 = []\n for key, value in intf_vlan_mapping.items():\n list1.append(key)\n for command in access_template:\n if command.endswith(\"vlan\"):\n list1.append(command + ' ' + str(value))\n else:\n list1.append(command)\n if psecurity:\n for command in psecurity:\n list1.append(command)\n return list1\n\n\naccess_mode_template = [\n 'switchport mode access', 'switchport access vlan',\n 'switchport nonegotiate', 'spanning-tree portfast',\n 'spanning-tree bpduguard enable'\n]\n\nport_security_template = [\n 'switchport port-security maximum 2',\n 'switchport port-security violation restrict',\n 'switchport port-security'\n]\n\naccess_config = {\n 'FastEthernet0/12': 10,\n 'FastEthernet0/14': 11,\n 'FastEthernet0/16': 17\n}\n\nprint(generate_access_config(access_config, access_mode_template, port_security_template))\n","sub_path":"9_1.py","file_name":"9_1.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"231385301","text":"''' This script find the duplicate files using MD5, passing \n an argument which is the path of the directory.\n Condition: In-complete\n Author: Minh Bui\n'''\n\nimport os\nimport sys\n\ndef find_duplicates(path = os.getcwd()):\n ''' This function perform the calculating the md5 of files\n and group file with the same md5 value.\n '''\n cmd = 'cd ' + path\n wd = os.popen(cmd)\n if wd.read() != '':\n raise Exception('Directory does not exist.')\n else:\n cwd = os.getcwd()\n db = dict()\n listfiles = os.listdir(path)\n for item in listfiles:\n if path == os.getcwd():\n isFilePath = path + '/' + item\n else:\n isFilePath = cwd + '/' + path + '/' + item\n \n if os.path.isfile(isFilePath):\n cmd = 'md5sum ' + isFilePath\n fp = os.popen(cmd)\n message = fp.read()\n message = message.replace(item, ' ')\n md5 = message.strip()\n if md5 in db:\n db[md5].append(item)\n else:\n db[md5] = [item]\n wd.close()\n return db\n\ndef print_duplicates(db):\n ''' Print out the md5 key that has multiple files as values.\n '''\n for key in db:\n if len(db[key]) > 1:\n print('{0}'.format(db[key]))\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 0:\n db = find_duplicates()\n else:\n db = find_duplicates(sys.argv[1])\n print_duplicates(db)\n\n","sub_path":"scripts/find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490892895","text":"class Property:\n \"\"\"\n Represents property\n \"\"\"\n\n def __init__(self, square_feet='', beds='',\n baths='', **kwargs):\n super().__init__(**kwargs)\n self.square_feet = square_feet\n self.num_bedrooms = beds\n self.num_baths = baths\n\n def display(self):\n \"\"\"\n (int) -> (str)\n Prints a table with property details\n \"\"\"\n print(\"PROPERTY DETAILS\")\n print(\"================\")\n print(\"square footage: {}\".format(self.square_feet))\n print(\"bedrooms: {}\".format(self.num_bedrooms))\n print(\"bathrooms: {}\".format(self.num_baths))\n print()\n\n def prompt_init():\n \"\"\"\n (int) -> (dict)\n Returns dictionary with the square feet, number of bedrooms and baths\n \"\"\"\n return dict(square_feet=input(\"Enter the square feet: \"),\n beds=input(\"Enter number of bedrooms: \"),\n baths=input(\"Enter number of baths: \"))\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Apartment(Property):\n \"\"\"\n Represents Apartment and takes Property as superclass\n \"\"\"\n valid_laundries = (\"coin\", \"ensuite\", \"none\")\n valid_balconies = (\"yes\", \"no\", \"solarium\")\n\n def __init__(self, balcony='', laundry='', **kwargs):\n\n super().__init__(**kwargs)\n self.balcony = balcony\n self.laundry = laundry\n\n def display(self):\n \"\"\"\n (str) -> (str)\n Prints a table with apartment details\n \"\"\"\n super().display()\n print(\"APARTMENT DETAILS\")\n print(\"laundry: %s\" % self.laundry)\n print(\"has balcony: %s\" % self.balcony)\n parent_init = Property.prompt_init()\n laundry = ''\n while laundry.lower() not in \\\n Apartment.valid_laundries:\n laundry = input(\"What laundry facilities does \"\n \"the property have? ({})\".format(\n \", \".join(Apartment.valid_laundries)))\n balcony = ''\n while balcony.lower() not in \\\n Apartment.valid_balconies:\n balcony = input(\n \"Does the property have a balcony? \"\n \"({})\".format(\n \", \".join(Apartment.valid_balconies)))\n parent_init.update({\n \"laundry\": laundry,\n \"balcony\": balcony\n })\n return parent_init\n\n\ndef get_valid_input(input_string, valid_options):\n \"\"\"\n Validation function\n \"\"\"\n input_string += \" ({}) \".format(\", \".join(valid_options))\n response = input(input_string)\n while response.lower() not in valid_options:\n response = input(input_string)\n return response\n\n\ndef prompt_init():\n \"\"\"\n Return laundry facilities and presence of the balcony\n \"\"\"\n\n parent_init = Property.prompt_init()\n laundry = get_valid_input(\n \"What laundry facilities does \"\n \"the property have? \",\n Apartment.valid_laundries)\n balcony = get_valid_input(\n \"Does the property have a balcony? \",\n Apartment.valid_balconies)\n parent_init.update({\n \"laundry\": laundry,\n \"balcony\": balcony\n })\n return parent_init\n\n\nprompt_init = staticmethod(prompt_init)\n\n\nclass House(Property):\n \"\"\"\n Represents House and takes Property as a superclass\n \"\"\"\n valid_garage = (\"attached\", \"detached\", \"none\")\n valid_fenced = (\"yes\", \"no\")\n\n def __init__(self, num_stories='',\n garage='', fenced='', **kwargs):\n super().__init__(**kwargs)\n self.garage = garage\n self.fenced = fenced\n self.num_stories = num_stories\n\n def display(self):\n \"\"\"\n Displays details\n \"\"\"\n super().display()\n print(\"HOUSE DETAILS\")\n print(\"# of stories: {}\".format(self.num_stories))\n print(\"garage: {}\".format(self.garage))\n print(\"fenced yard: {}\".format(self.fenced))\n\n def prompt_init():\n \"\"\"\n Static method - yard, garage and number of stories\n Returns that information to the user\n \"\"\"\n parent_init = Property.prompt_init()\n fenced = get_valid_input(\"Is the yard fenced? \",\n House.valid_fenced)\n garage = get_valid_input(\"Is there a garage? \",\n House.valid_garage)\n num_stories = input(\"How many stories? \")\n parent_init.update({\n \"fenced\": fenced,\n \"garage\": garage,\n \"num_stories\": num_stories\n })\n return parent_init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Purchase:\n \"\"\"\n Represents class Purchase\n \"\"\"\n\n def __init__(self, price='', taxes='', **kwargs):\n super().__init__(**kwargs)\n self.price = price\n self.taxes = taxes\n\n def display(self):\n \"\"\"\n Displays details\n \"\"\"\n super().display()\n print(\"PURCHASE DETAILS\")\n print(\"selling price: {}\".format(self.price))\n print(\"estimated taxes: {}\".format(self.taxes))\n\n def prompt_init():\n \"\"\"\n Static method - information about price and taxes\n Returns price and taxes\n \"\"\"\n return dict(\n price=input(\"What is the selling price? \"),\n taxes=input(\"What are the estimated taxes? \"))\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Rental:\n \"\"\"\n Represents class Rental\n \"\"\"\n\n def __init__(self, furnished='', utilities='',\n rent='', **kwargs):\n super().__init__(**kwargs)\n self.furnished = furnished\n self.rent = rent\n self.utilities = utilities\n\n def display(self):\n \"\"\"\n Displays details\n \"\"\"\n super().display()\n print(\"RENTAL DETAILS\")\n print(\"rent: {}\".format(self.rent))\n print(\"estimated utilities: {}\".format(\n self.utilities))\n print(\"furnished: {}\".format(self.furnished))\n\n def prompt_init():\n \"\"\"\n Returns dictionary with: representing monthly rent,\n utilities and if is furnished\n \"\"\"\n return dict(\n rent=input(\"What is the monthly rent? \"),\n utilities=input(\n \"What are the estimated utilities? \"),\n furnished=get_valid_input(\n \"Is the property furnished? \",\n (\"yes\", \"no\")))\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass HouseRental(Rental, House):\n \"\"\"\n Represents House rental class and takes Rental, House as a superclass\n \"\"\"\n\n def prompt_init():\n \"\"\"\n Static method, which add to the existing dictionary\n \"\"\"\n init = House.prompt_init()\n init.update(Rental.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass ApartmentRental(Rental, Apartment):\n \"\"\"\n Represents Apartment rental class and takes Rental, Apartment as a superclass\n \"\"\"\n\n def prompt_init():\n \"\"\"\n Static method, which add to the existing dictionary\n \"\"\"\n init = Apartment.prompt_init()\n init.update(Rental.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass ApartmentPurchase(Purchase, Apartment):\n \"\"\"\n Represents Apartment rental class and takes Purchase, Apartment as a superclass\n \"\"\"\n\n def prompt_init():\n \"\"\"\n Static method, which add to the existing dictionary\n \"\"\"\n init = Apartment.prompt_init()\n init.update(Purchase.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass HousePurchase(Purchase, House):\n def prompt_init():\n \"\"\"\n Static method, which add to the existing dictionary\n \"\"\"\n init = House.prompt_init()\n init.update(Purchase.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Agent:\n \"\"\"\n Represents class Agent, where you can do a payment job and choose\n type of property\n \"\"\"\n\n def __init__(self):\n self.property_list = []\n\n def display_properties(self):\n \"\"\"\n Dispalys details\n \"\"\"\n for property in self.property_list:\n property.display()\n\n type_map = {\n (\"house\", \"rental\"): HouseRental,\n (\"house\", \"purchase\"): HousePurchase,\n (\"apartment\", \"rental\"): ApartmentRental,\n (\"apartment\", \"purchase\"): ApartmentPurchase\n }\n\n def add_property(self):\n \"\"\"\n Information about a payment job and type of property\n \"\"\"\n property_type = get_valid_input(\n \"What type of property? \",\n (\"house\", \"apartment\")).lower()\n payment_type = get_valid_input(\n \"What payment type? \",\n (\"purchase\", \"rental\")).lower()\n PropertyClass = self.type_map[\n (property_type, payment_type)]\n init_args = PropertyClass.prompt_init()\n self.property_list.append(PropertyClass(**init_args))\n\n def add_field_court(self):\n \"\"\"\n Adding or not football field to the property\n \"\"\"\n quest1 = get_valid_input(\n \"Do you need a swimming pool? \",\n (\"yes\", \"no\"))\n\n quest2 = get_valid_input(\n \"Dou you need a gazebo? \",\n (\"yes\", \"no\"))\n\n\nagent = Agent()\nagent.add_property()\nagent.display_properties()\nagent.add_field_court()","sub_path":"property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":9392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"64429156","text":"# -*- coding:utf-8 -*-\n__author__ = ''\n__date__ = '2017/5/23 8:57'\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom admin.utils.paginator import MyPaginator\n\nfrom admin.forms import UserForm\nfrom admin.models import User\nfrom admin.utils import method\n\n\nclass UserView(View):\n def get(self, request):\n users = User.objects.values('id', 'nick', 'role', 'status', 'add_time')\n paginator = MyPaginator(users, 10)\n page_num = request.GET.get('page', 1)\n try:\n users = paginator.page(page_num)\n except Exception as e:\n print(e)\n return render(request, 'sys/user.html', locals())\n\n\nclass UserEditView(View):\n def get(self, request, user_id):\n user_id = user_id\n if (user_id):\n user = User.objects.values('nick', 'name', 'pwd', 'role', 'status').filter(id=user_id).first()\n return render(request, 'sys/user_edit.html', locals())\n\n def post(self, request, user_id):\n msg = {}\n try:\n user = User.objects.get(pk=user_id)\n form = UserForm(request.POST,instance=user)\n if form.is_valid():\n try:\n form.save()\n msg['status'] = 0\n except:\n msg['status'] = 1\n else:\n msg['status'] = 1\n except:\n msg['status'] = 1\n return render(request, 'sys/user_edit.html', locals())\n\n\nclass UserAddView(View):\n def get(self, request):\n return render(request, 'sys/user_add.html')\n\n def post(self, request):\n form = UserForm(request.POST)\n msg = {}\n if form.is_valid():\n try:\n user = form.save()\n user.pwd = method.md5('ikg' + 'ikg123')\n user.save()\n msg['status'] = 0\n except:\n msg['status'] = 1\n else:\n msg['status'] = 1\n return render(request, 'sys/user_add.html', locals())\n\n\nclass UserInfoView(View):\n def get(self, request,user_id):\n user = User.objects.values('id', 'nick', 'role', 'status', 'add_time','name')\\\n .filter(id=user_id).first()\n return render(request, 'sys/user_info.html', locals())\n","sub_path":"apps/admin/views/sys/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"391510563","text":"import gym\n\nenvs = ['SeaquestNoFrameskip-v4',\n 'BreakoutNoFrameskip-v4',\n 'QberNoFrameskip-v4',\n 'HalfCheetah-v1',\n 'Hopper-v1',\n 'PongDeterministic-v4',\n ]\nenv = gym.make(envs[5])\ns_dim = env.observation_space.shape[0]\na_dim = env.action_space.n\nprint('s_dim: ', s_dim)\nprint('a_dim: ', a_dim)\n# env.reset()\n# env.render()\n#\n# env.monitor.start('/tmp/reacher-1', force=True)\nfor i_episode in range(101):\n observation = env.reset()\n for t in range(10000):\n env.render()\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('reward', reward)\n\n if done:\n print('Episode finished after {} timesteps'.format(t+1))\n break\n","sub_path":"abr/ppo-test/env_test.py","file_name":"env_test.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"371623805","text":"import math\n\n\n\nimport random\nfrom fractions import gcd\n\n_mrpt_num_trials = 5 # number of bases to test\n\n\ndef is_probable_prime(n):\n assert n >= 2\n # special case 2\n if n == 2:\n return True\n # ensure n is odd\n if n % 2 == 0:\n return False\n # write n-1 as 2**s * d\n # repeatedly try to divide n-1 by 2\n s = 0\n d = n-1\n while True:\n quotient, remainder = divmod(d, 2)\n if remainder == 1:\n break\n s += 1\n d = quotient\n assert(2**s * d == n-1)\n\n # test the base a to see whether it is a witness for the compositeness of n\n def try_composite(a):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2**i * d, n) == n-1:\n return False\n return True # n is definitely composite\n\n for i in range(_mrpt_num_trials):\n a = random.randrange(2, n)\n if try_composite(a):\n return False\n\n return True # no base tested showed n as composite\n\n\ndef make_number(base, n):\n n_len = len(str(n)) -1\n number = 0\n for item in str(n):\n number += (int(item) * pow(base, n_len))\n n_len -= 1\n\n return number\n\n\ndef get_divisor(N):\n if N%2==0:\n return 2\n y,c,m = random.randint(1, N-1),random.randint(1, N-1),random.randint(1, N-1)\n g,r,q = 1,1,1\n while g==1:\n x = y\n for i in range(r):\n y = ((y*y)%N+c)%N\n k = 0\n while (k1:\n break\n\n return g\n\ndef main():\n f = open(\"C-small-attempt.in\")\n\n lines = f.readlines()\n case = lines[0].rstrip()\n print(\"Case #%s:\" % case)\n\n for index in range(1,int(case)+1):\n line = lines[index].rstrip()\n list_item = line.split()\n\n N = int(list_item[0])\n J = int(list_item[1])\n n1 = N - 2\n n2 = ''\n for i in range(0,int(n1)):\n n2 += '1'\n\n digit = make_number(2, n2)\n s = bin(digit)\n l = len(s)\n mid = s[2:l]\n\n result = []\n list_divisor = []\n found_count = 0\n target = make_number(2, mid)\n for i in range(0, target+1):\n if found_count == J:\n break\n\n result = []\n list_divisor = []\n s = bin(i)\n l = len(s)\n mid = s[2:l]\n\n dd = (\"%0\" + str(n1) + \"d\") % int(mid)\n\n is_break = False\n t = '1' + str(dd) + '1'\n #print(\"%s ==> t: %s\" % (i,t))\n for ii in range(2, 11):\n mn = make_number(ii, t)\n #print(\"mn : %s\" % mn)\n if is_probable_prime(mn):\n is_break = True\n break\n else:\n result.append(mn)\n\n if not is_break:\n found_count += 1\n #print(\"%s , %s ==> r: %s\" % (found_count, t,result))\n for r_item in result:\n list_divisor.append(str(get_divisor(r_item)))\n seq = \" \".join(list_divisor)\n\n print(\"%s %s\" % (t,seq))\n\n\n index += 1\n\n f.close()\n\n\nmain()\n\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_caslte_2016-c-large.py","file_name":"16_0_3_caslte_2016-c-large.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"298074287","text":"from django import forms\n\nfrom .models import Post\n\n\nclass PostCreateForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title','content')\n widgets = {\n 'content': forms.Textarea(\n attrs={'rows': 10, 'cols': 30, 'placeholder': 'ここに入力'}\n ),\n }\n","sub_path":"apps/tweet/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"74164995","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[2]:\n\n\nmessages = pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\\t',\n names=[\"label\", \"message\"])\n\n\n# In[4]:\n\n\nmessages['label']\n\n\n# In[5]:\n\n\nmessages['message']\n\n\n# In[7]:\n\n\n#Data cleaning and preprocessing\nimport re\nimport nltk\nnltk.download('stopwords')\n\n\n# In[8]:\n\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n\n# In[9]:\n\n\nps = PorterStemmer()\ncorpus = []\n\n\n# In[10]:\n\n\nfor i in range(0, len(messages)):\n review = re.sub('[^a-zA-Z]', ' ', messages['message'][i])\n review = review.lower()\n review = review.split()\n \n review = [ps.stem(word) for word in review if not word in stopwords.words('english')]\n review = ' '.join(review)\n corpus.append(review)\n\n\n# In[14]:\n\n\nreview\n\n\n# In[15]:\n\n\ncorpus\n\n\n# In[16]:\n\n\n# Creating the Bag of Words model\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer(max_features=2500)\nX = cv.fit_transform(corpus).toarray()\n\n\n# In[17]:\n\n\ny=pd.get_dummies(messages['label'])\ny=y.iloc[:,1].values\n\n\n# In[18]:\n\n\n# Train Test Split\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n\n\n# In[19]:\n\n\n# Training model using Naive bayes classifier\n\nfrom sklearn.naive_bayes import MultinomialNB\nspam_detect_model = MultinomialNB().fit(X_train, y_train)\n\ny_pred=spam_detect_model.predict(X_test)\n\n\n# In[20]:\n\n\nfrom sklearn.metrics import confusion_matrix\nconfun_m=confusion_matrix(y_test,y_pred)\n\n\n# In[21]:\n\n\nconfun_m\n\n\n# In[22]:\n\n\nfrom sklearn.metrics import accuracy_score\naccu_s=accuracy_score(y_test,y_pred)\n\n\n# In[23]:\n\n\naccu_s\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"spam or not.py","file_name":"spam or not.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"210614852","text":"\"\"\"watches URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom shop.views import home, products, productDetail, compare\nfrom cart.views import add, remove, change_quantity, cart, cart_clear\nfrom orders.views import order\nfrom users.views import register, profile, login_view, logout_view\nfrom filters.views import filters_view\n\nurlpatterns = [\n #ADMIN URL\n\tpath('admin/', admin.site.urls),\n\n #SHOP URLS\n path('', home, name='home'),\n path('products//', products, name='products'),\n path('products////', productDetail, name='productDetail'),\n path('products//compare/', compare, name='compare'),\n\n #CART URLS\n path('cart/add/', add, name='add'),\n path('cart/remove/', remove, name='remove'),\n path('cart/change_quantity/', change_quantity, name='change_quantity'),\n path('cart/', cart, name='cart'),\n path('cart_clear/', cart_clear, name='cart_clear'),\n\n #ORDERS URLS\n path('order/', order, name='order'),\n\n #USERS URLS\n path('register/', register, name='register'),\n path('profile/', profile, name='profile'),\n path('login/', login_view, name='login'),\n path('logout/', logout_view, name='logout'),\n\n #FILTERS URLS\n path('products//filters/', filters_view, name='filters'),\n] \n\nif settings.DEBUG:\n\turlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\turlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)","sub_path":"watches/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"251775891","text":"# -*- coding: utf-8 -*-\n\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime,timedelta\nfrom odoo import api, fields, models, _\n\n\nclass HrEmployee(models.Model):\n _inherit = 'hr.employee'\n\n certificates = fields.Boolean(default=True, string=\"Certificates\")\n\n\nclass EmployeeTraining(models.Model):\n _name = 'employee.training'\n _rec_name = 'program_name'\n _description = \"Employee Training\"\n _inherit = 'mail.thread'\n\n program_name = fields.Char(string='Training Program', required=True)\n program_department = fields.Many2one('hr.department', string='Department', required=True)\n program_convener = fields.Many2one('res.users', string='Responsible User', size=32, required=True)\n training_id = fields.One2many('hr.employee', string='Employee Details', compute=\"employee_details\")\n note_id = fields.Text('Description')\n date_from = fields.Datetime(string=\"Date From\")\n date_to = fields.Datetime(string=\"Date To\")\n user_id = fields.Many2one('res.users', string='users', default=lambda self: self.env.user)\n company_id = fields.Many2one('res.company', string='Company', required=True,\n default=lambda self: self.env.user.company_id)\n # product_updatable = fields.Boolean(compute='_compute_product_updatable', string='Can Edit Product', readonly=True, default=True)\n # @api.depends('training_id')\n # def _compute_product_updatable(self):\n # for line in self:\n # if line.state in ['done', 'cancel'] or (line.state == 'sale' and (line.qty_invoiced > 0 or line.qty_delivered > 0)):\n # line.product_updatable = False\n # else:\n # line.product_updatable = True\n\n state = fields.Selection([\n ('new', 'New'),\n ('confirm', 'Confirmed'),\n ('cancel', 'Canceled'),\n ('complete', 'Completed'),\n ('print', 'Print'),\n ], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='new')\n\n @api.onchange('program_department')\n def employee_details(self):\n datas = self.env['hr.employee'].search([('department_id', '=', self.program_department.id)])\n self.training_id = datas\n\n @api.multi\n def print_event(self):\n self.ensure_one()\n started_date = datetime.strftime(self.create_date, \"%Y-%m-%d \")\n duration = (self.write_date - self.create_date).days\n pause = relativedelta(hours=0)\n difference = relativedelta(self.write_date, self.create_date) - pause\n hours = difference.hours\n minutes = difference.minutes\n data = {\n 'dept_id': self.program_department.id,\n 'program_name': self.program_name,\n 'company_name': self.company_id.name,\n 'date_to': started_date,\n 'duration': duration,\n 'hours': hours,\n 'minutes': minutes,\n 'program_convener': self.program_convener.name,\n\n }\n return self.env.ref('employee_orientation.print_pack_certificates').report_action(self, data=data)\n\n @api.multi\n def complete_event(self):\n self.write({'state': 'complete'})\n\n @api.multi\n def confirm_event(self):\n self.write({'state': 'confirm'})\n\n @api.multi\n def cancel_event(self):\n self.write({'state': 'cancel'})\n\n @api.multi\n def confirm_send_mail(self):\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference('employee_orientation', 'orientation_training_mailer')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n ctx = dict(self.env.context or {})\n ctx.update({\n 'default_model': 'employee.training',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n })\n\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n","sub_path":"employee_orientation/models/employee_training.py","file_name":"employee_training.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319678651","text":"from pymongo import MongoClient\nclient = MongoClient()\ndb = client.whosampled\n\nimport numpy as np\nfrom time import sleep\nfrom src.scrape_data_clean.whosampled_scrape import Scraper\n\nif __name__ == \"__main__\":\n scraper = Scraper()\n failed_links = []\n\n song_sampled_pages_to_do = db.song_sampled_pages_to_do.distinct('link')\n\n for song_sample_page in song_sampled_pages_to_do[-10000:]: \n try:\n scraper.insert_song_sample_info_into_db_main(song_sample_page)\n print('Done with {}'.format(song_sample_page))\n except:\n print(\"Insertion into Mongo failed for {}\".format(song_sample_page))\n failed_links.append(song_sample_page)\n sleep(1.1)\n print(failed_links)\n scraper.driver.quit()","sub_path":"src/scrape_data_clean/put_info_from_links_in_mongo_db/insert_song_sample_info_into_main_2.py","file_name":"insert_song_sample_info_into_main_2.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"332094667","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*- \n\nimport os\nimport sys\n\nextractors = (\"SURF\", \"SIFT\")\ndetectors = (\"SURF\", \"FAST\", \"STAR\", \"SIFT\")\nclassifiers = (\"NormalBayesClassifier\", \"KNearest\")\n\nfor classifier in classifiers:\n\tfor detector in detectors:\n\t\tfor extractor in extractors:\n\t\t\tos.system(\"./DetectorDeMonumentos %s %s %s >> avaliacao-final1\" % (detector, extractor, classifier))\n","sub_path":"evaluate3.py","file_name":"evaluate3.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"414739145","text":"from flask import Flask,render_template,request\n\nimport re \nimport tweepy \nfrom tweepy import OAuthHandler \nfrom textblob import TextBlob \napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method==\"POST\":\n class TwitterClient(object): \n def __init__(self): \n '''Class constructor or initialization method.'''\n # keys and tokens from the Twitter Dev Console \n consumer_key = 'XXXXXXXXXXXXXXXX'\n consumer_secret = 'XXXXXXXXXXXXXXXXXXX'\n access_token = 'XXXXXXXXXXXXXXXXXXXXXXXXXX'\n access_token_secret = 'XXXXXXXXXXXXXXXXXXXXX'\n # attempt authentication \n try: \n # create OAuthHandler object \n self.auth = OAuthHandler(consumer_key, consumer_secret) \n # set access token and secret \n self.auth.set_access_token(access_token, access_token_secret) \n # create tweepy API object to fetch tweets \n self.api = tweepy.API(self.auth) \n except: \n print(\"Error: Authentication Failed\") \n\n def clean_tweet(self, tweet): \n ''' \n Utility function to clean tweet text by removing links, special characters \n using simple regex statements. \n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split()) \n def get_tweet_sentiment(self, tweet): \n ''' \n Utility function to classify sentiment of passed tweet \n using textblob's sentiment method \n '''\n # create TextBlob object of passed tweet text \n analysis = TextBlob(self.clean_tweet(tweet)) \n # set sentiment \n if analysis.sentiment.polarity > 0: \n return 'positive'\n elif analysis.sentiment.polarity == 0: \n return 'neutral'\n else: \n return 'negative'\n\n def get_tweets(self, query, count = 10): \n ''' \n Main function to fetch tweets and parse them. \n '''\n # empty list to store parsed tweets \n tweets = [] \n\n try: \n # call twitter api to fetch tweets \n fetched_tweets = self.api.search(q = query, count = count) \n\n # parsing tweets one by one \n for tweet in fetched_tweets: \n # empty dictionary to store required params of a tweet \n parsed_tweet = {} \n\n # saving text of tweet \n parsed_tweet['text'] = tweet.text \n # saving sentiment of tweet \n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text) \n\n # appending parsed tweet to tweets list \n if tweet.retweet_count > 0: \n # if tweet has retweets, ensure that it is appended only once \n if parsed_tweet not in tweets: \n tweets.append(parsed_tweet) \n else: \n tweets.append(parsed_tweet) \n\n # return parsed tweets \n return tweets \n\n except tweepy.TweepError as e: \n # print error (if any) \n print(\"Error : \" + str(e)) \n try:\n def main(): \n # creating object of TwitterClient Class \n accountname=request.form.get('accountname', False)\n api = TwitterClient() \n \n tweets = api.get_tweets(query = accountname, count = 500) \n global positivation\n global negation\n global neutraly\n \n ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive'] \n\n positivation=(100*len(ptweets)/len(tweets))*2\n \n ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative'] \n \n negation=(100*len(ntweets)/len(tweets))*2\n \n \n neutraly=100-(positivation+negation)\n global negarray\n global posarray\n negarray=[]\n posarray=[]\n \n for tweet in ptweets:\n negarray.append(tweet['text'])\n \n \n for tweet in ntweets: \n posarray.append(tweet['text']) \n print(posarray)\n print(negarray)\n\n main()\n return render_template('output.html',positivation=positivation,negation=negation,neutraly=neutraly,negarray=negarray,posarray=posarray)\n except:\n return '

Pappu not here

' \n return render_template('index.html')\n\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"twitterapp.py","file_name":"twitterapp.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"289918670","text":"import string\ndef isWordGuessed(secretWord, lettersGuessed):\n secretWord = list(secretWord)\n items = set(lettersGuessed)\n count = [i for i in secretWord if i in items]\n if len(secretWord) is len(count):\n return True\n else:\n return False\n\ndef underScore(secretWord, lettersGuessed):\n resultList = []\n secretWordList = list(secretWord)\n for x in range(len(secretWord)):\n resultList.append('_ ')\n for i in lettersGuessed:\n for ind in range(len(secretWordList)):\n if i is secretWordList[ind]:\n resultList[ind] = i\n return ''.join(resultList)\n\ndef getAvailableLetters(lettersGuessed):\n availableLettersList = list(string.ascii_lowercase)\n for char in lettersGuessed:\n for charAlpha in availableLettersList:\n if char is charAlpha:\n availableLettersList.remove(charAlpha)\n return ''.join(availableLettersList)\n\ndef hangman(secretWord):\n lettersGuessed = []\n rightWord = 0\n numOfGuesses = 8\n print(\"Welcome to the game, Hangman!\")\n print(\"I am thinking of a word that is {} letters long.\".format(len(secretWord)))\n print(\"-----------\")\n while numOfGuesses > 0:\n print(\"You have {} guesses left.\".format(numOfGuesses))\n print(\"Available letters: \" + getAvailableLetters(lettersGuessed))\n guess = input(\"Please guess a letter: \")\n if guess in list(secretWord) and guess not in lettersGuessed:\n lettersGuessed.append(guess)\n print(\"Good guess: \" + underScore(secretWord,lettersGuessed) + \"\\n\")\n rightWord = len(underScore(secretWord,lettersGuessed))\n elif guess in lettersGuessed:\n print(\"Oops! You've already guessed that letter:\" + underScore(secretWord,lettersGuessed) + \"\\n\")\n else:\n lettersGuessed.append(guess)\n print(\"Oops! That letter is not in my word:\" + underScore(secretWord,lettersGuessed) + \"\\n\")\n numOfGuesses -= 1\n print(\"-----------\")\n if numOfGuesses == 0:\n print(\"Sorry, you ran out of guesses. The word was {}.\".format(secretWord))\n if rightWord == len(secretWord):\n numOfGuesses = 0\n print(\"Congratulations, you won!\")\n \n \n\nsecretWord = 'sea'\n# lettersGuessed = ['e', 'd', 'i', 'l', 'h', 'k', 's', 'p', 'a', 'd']\n\n# print(underScore(secretWord, lettersGuessed))\n# print(getAvailableLetters(lettersGuessed))\n\nhangman(secretWord)\n","sub_path":"Python/Eksempler/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"492789293","text":"# -*- encoding: utf-8 -*-\n# © 2017 Mackilem Van der Laan, Trustcode\n# © 2017 Danimar Ribeiro, Trustcode\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom odoo import api, fields, models, _\n\n\nclass admission_statistical(models.TransientModel):\n _name = 'admission.statistical.wizard'\n\n year_id = fields.Many2one(\n string=\"Academic Year\",\n comodel_name=\"uni.year\",\n required=True,\n )\n\n admission_category_id = fields.Many2one(\n string=\"Admission Category\",\n comodel_name=\"uni.student_category\",\n )\n\n @api.multi\n def check_report(self, data):\n self.ensure_one()\n data = {}\n data['ids'] = self.env.context.get('active_ids', [])\n data['model'] = self.env.context.get('active_model', 'ir.ui.menu')\n data['form'] = self.read(\n ['year_id', 'admission_category_id'])[0]\n return self.env['report'].get_action(self, 'uni_admission.admission_statistical', data=data)\n","sub_path":"uni_admission/wizard/admission_statistical.py","file_name":"admission_statistical.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"646818191","text":"#!/usr/bin/env python3\n\nimport urllib.request, urllib.error, urllib.parse\nimport json\nimport base64\nimport sys, os\nimport datetime\nimport argparse, configparser\nimport query\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\ndefault_config_file = os.path.join(__location__, 'config.ini')\nconfig = configparser.ConfigParser()\n\nsource_url = None\ntarget_url = None\n\nhttp_error_messages = {}\nhttp_error_messages[401] = \"ERROR: There was a problem during authentication.\\nDouble check that your username and password are correct, and that you have permission to read from or write to the specified repositories.\"\nhttp_error_messages[403] = http_error_messages[401]; # Basically the same problem. GitHub returns 403 instead to prevent abuse.\nhttp_error_messages[404] = \"ERROR: Unable to find the specified repository.\\nDouble check the spelling for the source and target repositories. If either repository is private, make sure the specified user is allowed access to it.\"\n\n\ndef init_config():\n \n config.add_section('source')\n config.add_section('target')\n config.add_section('format')\n config.add_section('settings')\n \n arg_parser = argparse.ArgumentParser(description=\"Import issues from one GitHub repository into another.\")\n \n arg_parser.add_argument('--config', help=\"The location of the config file (either absolute, or relative to the current working directory). Defaults to `config.ini` found in the same folder as this script.\")\n arg_parser.add_argument('-source_u', '--source_username', help=\"The SOURCE username of the account on the SOURCE server the issues are to be copied from. The username will not be stored anywhere if passed in as an argument.\")\n arg_parser.add_argument('-source_p', '--source_password', help=\"The SOURCE password of the account on the SOURCE server the issues are to be copied from. The username will not be stored anywhere if passed in as an argument.\")\n arg_parser.add_argument('-target_u', '--target_username', help=\"The TARGET username of the account on the TARGET server the issues are to be copied from. The username will not be stored anywhere if passed in as an argument.\")\n arg_parser.add_argument('-target_p', '--target_password', help=\"The TARGET password of the account on the TARGET server the issues are to be copied from. The username will not be stored anywhere if passed in as an argument.\")\n arg_parser.add_argument('-source_s', '--source_server', help=\"The SOURCE server which the issues should be copied from. e.g. `github.com` or `github.mycompany.com` (for enterprise).\")\n arg_parser.add_argument('-target_s', '--target_server', help=\"The TARGET server which the issues should be copied to. e.g. `github.com` or `github.mycompany.com` (for enterprise).\")\n arg_parser.add_argument('-source_r', '--source_repo', help=\"The source repository which the issues should be copied from. Should be in the format `user/repository`.\")\n arg_parser.add_argument('-target_r', '--target_repo', help=\"The destination repository which the issues should be copied to. Should be in the format `user/repository`.\")\n \n arg_parser.add_argument('--ignore-comments', dest='ignore_comments', action='store_true', help=\"Do not import comments in the issue.\") \n arg_parser.add_argument('--ignore-milestone', dest='ignore_milestone', action='store_true', help=\"Do not import the milestone attached to the issue.\")\n arg_parser.add_argument('--ignore-labels', dest='ignore_labels', action='store_true', help=\"Do not import labels attached to the issue.\")\n \n arg_parser.add_argument(\"issues\", type=int, nargs='*', help=\"The list of issues to import. If no issue ID is provided, all open issues will be imported.\");\n \n args = arg_parser.parse_args()\n \n config_file_name = default_config_file\n if (args.config): config_file_name = args.config\n \n try:\n config_file = open(config_file_name)\n config.read_file(config_file)\n except FileNotFoundError:\n sys.exit(\"ERROR: Unable to find or open config file '%s'\" % config_file_name);\n \n if (args.source_username): config.set('source', 'username', args.source_username)\n if (args.source_password): config.set('source', 'password', args.source_password)\n if (args.target_username): config.set('target', 'username', args.target_username)\n if (args.target_password): config.set('target', 'password', args.target_password)\n if (args.source_server): config.set('source', 'server', args.source_server)\n if (args.target_server): config.set('target', 'server', args.target_server)\n if (args.source_repo): config.set('source', 'repository', args.source_repo)\n if (args.target_repo): config.set('target', 'repository', args.target_repo)\n \n config.set('settings', 'import-comments', str(not args.ignore_comments))\n config.set('settings', 'import-milestone', str(not args.ignore_milestone))\n config.set('settings', 'import-labels', str(not args.ignore_labels))\n \n \n # Make sure no required config values are missing\n if not config.has_option('source','repository') :\n sys.exit(\"ERROR: There is no source repository specified either in the config file, or as an argument.\")\n if not config.has_option('target','repository') :\n sys.exit(\"ERROR: There is no target repository specified either in the config file, or as an argument.\")\n \n # Prompt for SOURCE username/password if none is provided in either the config or an argument\n if not config.has_option('source', 'username') :\n config.set('source', 'username', query.username(\"Enter your username for GitHub.com: \"))\n if not config.has_option('source', 'password') :\n config.set('source', 'password', query.password(\"Enter your password for GitHub.com: \"))\n \n # Prompt for TARGET username/password if none is provided in either the config or an argument\n if not config.has_option('target', 'username') :\n config.set('target', 'username', query.username(\"Enter your TARGET username for GitHub.com: \"))\n if not config.has_option('target', 'password') :\n config.set('target', 'password', query.password(\"Enter your TARGET password for GitHub.com: \"))\n \n \n # Everything is here! Continue on our merry way...\n global source_url, target_url\n\n # if SOURCE server is not github.com, then assume ENTERPRISE github (yourdomain.com/api/v3...)\n if (config.get('source','server') != \"github.com\") :\n source_api_server = config.get('source','server')\n source_url = \"https://%s/api/v3/repos/%s\" % (source_api_server, config.get('source','repository'))\n else :\n source_api_server = \"api.github.com\"\n source_url = \"https://%s/repos/%s\" % (source_api_server, config.get('source','repository'))\n\n # if TARGET server is not github.com, then assume ENTERPRISE github (yourdomain.com/api/v3...)\n if (config.get('target','server') != \"github.com\") :\n target_api_server = config.get('target','server')\n target_url = \"https://%s/api/v3/repos/%s\" % (target_api_server, config.get('target','repository'))\n else :\n target_api_server = \"api.github.com\"\n target_url = \"https://%s/repos/%s\" % (target_api_server, config.get('target','repository'))\n \n \n return args.issues\n\ndef format_date(datestring):\n # The date comes from the API in ISO-8601 format\n date = datetime.datetime.strptime(datestring, \"%Y-%m-%dT%H:%M:%SZ\")\n date_format = config.get('format', 'date', fallback='%A %b %d, %Y at %H:%M GMT', raw=True);\n return date.strftime(date_format)\n \ndef format_from_template(template_filename, template_data):\n from string import Template\n template_file = open(template_filename, 'r')\n template = Template(template_file.read())\n return template.substitute(template_data)\n\ndef format_issue(template_data):\n default_template = os.path.join(__location__, 'templates', 'issue.md')\n template = config.get('format', 'issue_template', fallback=default_template)\n return format_from_template(template, template_data)\n\ndef format_pull_request(template_data):\n default_template = os.path.join(__location__, 'templates', 'pull_request.md')\n template = config.get('format', 'pull_request_template', fallback=default_template)\n return format_from_template(template, template_data)\n\ndef format_comment(template_data):\n default_template = os.path.join(__location__, 'templates', 'comment.md')\n template = config.get('format', 'comment_template', fallback=default_template)\n return format_from_template(template, template_data)\n\ndef send_request(which, url, post_data=None, req_method=None):\n if (post_data != None):\n post_data = json.dumps(post_data).encode(\"utf-8\")\n \n\n req = urllib.request.Request(url,post_data)\n \n username = config.get(which,'username')\n password = config.get(which,'password')\n \n if (req_method != None):\n req.method = req_method\n \n req.add_header(\"Authorization\", b\"Basic \" + base64.urlsafe_b64encode(username.encode(\"utf-8\") + b\":\" + password.encode(\"utf-8\")))\n req.add_header(\"Content-Type\", \"application/json\")\n req.add_header(\"Accept\", \"application/json\")\n req.add_header(\"User-Agent\", \"IQAndreas/github-issues-import\")\n \n try:\n response = urllib.request.urlopen(req)\n json_data = response.read()\n except urllib.error.HTTPError as error:\n \n error_details = error.read();\n error_details = json.loads(error_details.decode(\"utf-8\"))\n print(error_details)\n if (error.code in http_error_messages):\n sys.exit(http_error_messages[error.code])\n else:\n error_message = \"ERROR: There was a problem importing the issues.\\n%s %s\" % (error.code, error.reason)\n if ('message' in error_details):\n error_message += \"\\nDETAILS: \" + error_details['message']\n sys.exit(error_message)\n \n return json.loads(json_data.decode(\"utf-8\"))\n\ndef get_milestones(which, state, url):\n if (state == \"all\") :\n return send_request(which,\"%s/milestones\" % (url))\n else :\n return send_request(which,\"%s/milestones?state=%s\" % (url, state))\n \ndef get_labels(which, url):\n return send_request(which,\"%s/labels\" % url)\n \ndef get_issue_by_id(which, url, issue_id):\n return send_request(which,\"%s/issues/%d\" % (url, issue_id))\n\ndef get_issues(which, state, url):\n issues = []\n page = 1\n while True:\n if (state == \"all\") :\n open_issues = send_request(which,\"%s/issues?state=open&direction=asc&page=%d\" % (url, page))\n closed_issues = send_request(which,\"%s/issues?state=closed&direction=asc&page=%d\" % (url, page))\n if (not open_issues and not closed_issues):\n break\n issues.extend(open_issues)\n issues.extend(closed_issues)\n else :\n new_issues = send_request(which,\"%s/issues?state=%s&direction=asc&page=%d\" % (url, state, page))\n if not new_issues:\n break\n issues.extend(new_issues)\n \n page += 1\n return issues\n\ndef get_comments_on_issue(which,issue):\n if issue['comments'] != 0:\n return send_request(which,\"%s/comments\" % issue['url'])\n else :\n return []\n\ndef import_milestone(source):\n data = {\n \"title\": source['title'],\n \"state\": source['state'],\n \"description\": source['description'],\n \"due_on\": source['due_on']\n }\n \n result_milestone = send_request(\"target\",\"%s/milestones\" % target_url, source)\n print(\"Successfully created milestone '%s'\" % result_milestone['title'])\n return result_milestone\n\ndef import_label(source):\n data = {\n \"name\": source['name'],\n \"color\": source['color']\n }\n \n result_label = send_request(\"target\",\"%s/labels\" % target_url, source)\n print(\"Successfully created label '%s'\" % result_label['name'])\n return result_label\n\ndef import_comments(comments, issue_number):\n result_comments = []\n for comment in comments:\n \n template_data = {}\n template_data['comment_creator_username'] = comment['user']['login']\n template_data['comment_creator_url'] = comment['user']['html_url']\n template_data['comment_date'] = format_date(comment['created_at'])\n template_data['comment_url'] = comment['html_url']\n template_data['comment_body'] = comment['body']\n \n comment['body'] = format_comment(template_data)\n\n result_comment = send_request(\"target\",\"%s/issues/%s/comments\" % (target_url, issue_number), comment)\n result_comments.append(result_comment)\n \n return result_comments\n\n# Will only import milestones and issues that are in use by the imported issues, and do not exist in the target repository\ndef import_issues(state, issues):\n \n known_milestones = get_milestones(\"target\", state, target_url)\n def get_milestone_by_title(title):\n for milestone in known_milestones:\n if milestone['title'] == title : return milestone\n return None\n \n known_labels = get_labels(\"target\",target_url)\n def get_label_by_name(name):\n for label in known_labels:\n if label['name'] == name : return label\n return None\n \n new_issues = []\n closed_issues = []\n num_new_comments = 0\n new_milestones = []\n new_labels = []\n \n for issue in issues:\n new_issue = {}\n new_issue['title'] = issue['title']\n if config.getboolean('settings', 'import-comments') and 'comments' in issue and issue['comments'] != 0:\n num_new_comments += int(issue['comments'])\n new_issue['comments'] = get_comments_on_issue(\"source\",issue)\n \n if config.getboolean('settings', 'import-milestone') and 'milestone' in issue and issue['milestone'] is not None:\n # Since the milestones' ids are going to differ, we will compare them by title instead\n found_milestone = get_milestone_by_title(issue['milestone']['title'])\n if found_milestone:\n new_issue['milestone_object'] = found_milestone\n else:\n new_milestone = issue['milestone']\n new_issue['milestone_object'] = new_milestone\n known_milestones.append(new_milestone) # Allow it to be found next time\n new_milestones.append(new_milestone) # Put it in a queue to add it later\n \n if config.getboolean('settings', 'import-labels') and 'labels' in issue and issue['labels'] is not None:\n new_issue['label_objects'] = []\n for issue_label in issue['labels']:\n found_label = get_label_by_name(issue_label['name'])\n if found_label:\n new_issue['label_objects'].append(found_label)\n else:\n new_issue['label_objects'].append(issue_label)\n known_labels.append(issue_label) # Allow it to be found next time\n new_labels.append(issue_label) # Put it in a queue to add it later\n \n template_data = {}\n template_data['issue_creator_username'] = issue['user']['login']\n template_data['issue_creator_url'] = issue['user']['html_url']\n template_data['issue_date'] = format_date(issue['created_at'])\n template_data['issue_url'] = issue['html_url']\n template_data['issue_body'] = issue['body']\n \n if \"pull_request\" in issue and issue['pull_request']['html_url'] is not None:\n new_issue['body'] = format_pull_request(template_data)\n else:\n new_issue['body'] = format_issue(template_data)\n \n new_issues.append(new_issue)\n \n if (issue['state'] == \"closed\") :\n close_issue = {}\n close_issue['number'] = issue['number']\n close_issue['state'] = \"closed\"\n closed_issues.append(close_issue)\n \n print(\"You are about to add to '\" + config.get('target','repository') + \"':\")\n print(\" *\", len(new_issues), \"creating issues\") \n print(\" *\", len(closed_issues), \"closing issues\") \n print(\" *\", num_new_comments, \"new comments\") \n print(\" *\", len(new_milestones), \"new milestones\") \n print(\" *\", len(new_labels), \"new labels\") \n if not query.yes_no(\"Are you sure you wish to continue?\"):\n sys.exit()\n \n for milestone in new_milestones:\n result_milestone = import_milestone(state, milestone)\n milestone['number'] = result_milestone['number']\n milestone['url'] = result_milestone['url']\n \n for label in new_labels:\n result_label = import_label(label)\n \n result_issues = []\n for issue in new_issues:\n if 'milestone_object' in issue:\n issue['milestone'] = issue['milestone_object']['number']\n del issue['milestone_object']\n \n if 'label_objects' in issue:\n issue_labels = []\n for label in issue['label_objects']:\n issue_labels.append(label['name'])\n issue['labels'] = issue_labels\n del issue['label_objects']\n \n result_issue = send_request(\"target\",\"%s/issues\" % target_url, issue)\n print(\"Successfully created issue '%s'\" % result_issue['title'])\n \n if 'comments' in issue:\n result_comments = import_comments(issue['comments'], result_issue['number']) \n print(\" > Successfully added\", len(result_comments), \"comments.\")\n \n result_issues.append(result_issue)\n \n for issue in closed_issues:\n result_issue = send_request(\"target\",\"%s/issues/%d\" % (target_url,issue['number']), issue, 'PATCH')\n print(\"Successfully closed issue '%s'\" % result_issue['title'])\n \n #result_issues.append(result_issue)\n\n return result_issues\ndef import_some_issues(issue_ids):\n # Populate issues based on issue IDs\n issues = []\n for issue_id in issue_ids:\n issues.append(get_issue_by_id(\"source\",source_url, int(issue_id)))\n \n return import_issues(issues)\n\ndef import_open_issues():\n # Populate issues based on issue IDs\n issues = []\n \n issues.append(get_issues(\"source\",\"open\",source_url))\n \n return import_issues(issues)\n\ndef import_closed_issues():\n # Populate issues based on issue IDs\n issues = []\n issues.append(get_issues(\"source\",\"closed\",source_url))\n \n return import_issues(issues)\n\ndef import_all_issues():\n issues = get_issues(\"source\",\"all\",source_url)\n return import_issues(\"all\",issues)\n\nif __name__ == '__main__':\n \n issue_ids = init_config()\n \n if (len(issue_ids) > 0):\n import_some_issues(issue_ids)\n else:\n import_all_issues()\n \n\n","sub_path":"gh-issues-import.py","file_name":"gh-issues-import.py","file_ext":"py","file_size_in_byte":18820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"171171696","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nShuan gameplay prototype core module\n\n(c) 2012 Opensource Game Studio Team (http://opengamestudio.org)\n'''\n\nfrom cocos import menu, layer, scene, scenes, text, sprite, euclid, cocosnode, batch\nimport math\nimport random\nimport copy\nfrom helpers import *\n\n'''\nCONSTANTS\n'''\nEMPTY = -1\nPROJECTILE = 0\nRAY = 1\nTURRET = 2\nSPAWN = 3\nAURA = 4\nEFFECT = 5 \n\nSLOTGUN = 0\nSLOTWEAPON = 1\nSLOTDEVICE = 2\nSLOTNONE = 3\n\nEGSIMPLE = 0\nEGNOSHIELDS = 1\nEGBOOSTSHIELDS = 2\n\n'''\nACTION CLASSES\n'''\nclass ActionDie(actions.InstantAction):\n def start(self):\n self.target.kill()\n\nclass ActionAim(actions.InstantAction):\n def init(self, selector=None):\n self.selector = selector\n def start(self):\n actor = self.target\n selector = self.selector\n target = None\n if selector == 'Avatar':\n target = currents['avatarObject']\n elif selector == 'AvatarTarget':\n target = currents['layerObject'].target\n elif selector == 'AnyFriend':\n target = random.choice(currents['layerObject'].avatarHelpers)\n elif selector == 'AnyEnemy':\n if currents['layerObject'].enemies:\n target = random.choice(currents['layerObject'].enemies)\n elif selector == 'Last':\n if actor._target:\n target = actor._target\n \n actor._target = target\n\nclass ActionShoot(ActionAim):\n def start(self):\n actor = self.target\n selector = self.selector\n target = None\n if selector == 'Avatar':\n target = currents['avatarObject']\n elif selector == 'AvatarTarget':\n target = currents['layerObject'].target\n elif selector == 'AnyFriend':\n target = random.choice(currents['layerObject'].avatarHelpers)\n elif selector == 'AnyEnemy':\n if currents['layerObject'].enemies:\n target = random.choice(currents['layerObject'].enemies)\n elif selector == 'Last':\n if actor._target:\n target = actor._target\n \n self.target.shoot(target)\n\nclass ActionStopShooting(actions.InstantAction):\n def start(self):\n self.target.stopShooting()\n\nclass ActionAimMovement(actions.InstantAction):\n def __init__(self, selector, speed, duration, bx1=0, by1=0, bx2=1, by2=1):\n super(ActionAimMovement, self).__init__()\n self.speed = speed\n self.duration = duration\n self.selector = selector\n self.bounds = (bx1, by1, bx2, by2)\n \n def start(self):\n actor = self.target\n \n selector = self.selector\n target = None\n if selector == 'Avatar':\n target = currents['avatarObject']\n elif selector == 'AvatarTarget':\n target = currents['layerObject'].target\n elif selector == 'AnyFriend':\n target = random.choice(currents['layerObject'].avatarHelpers)\n elif selector == 'AnyEnemy':\n if currents['layerObject'].enemies:\n target = random.choice(currents['layerObject'].enemies)\n elif selector == 'Last':\n if actor._target:\n target = actor._target\n \n if not target:\n actor.do(actions.Delay(self.duration))\n return\n \n destination = list(abs2rel(*target.position))\n if self.bounds:\n bounds = self.bounds \n destination[0] = max(destination[0], bounds[0])\n destination[1] = max(destination[1], bounds[1])\n destination[0] = min(destination[0], bounds[2])\n destination[1] = min(destination[1], bounds[3])\n \n destination = rel(*destination)\n \n deltaY = destination[1] - actor.position[1]\n deltaX = destination[0] - actor.position[0]\n dist = math.sqrt(deltaX**2 + deltaY**2)\n if self.speed:\n dur = dist/self.speed\n coeff = self.duration/dur\n else:\n coeff = 1\n actor.do(actions.MoveBy((deltaX*coeff, deltaY*coeff), duration = self.duration))\n\nclass ActionRandomMovement(actions.IntervalAction):\n def init(self, duration, bx1=0, by1=0, bx2=1, by2=1):\n self.duration = duration\n self.initial = None\n self.destination = None\n self.bounds = (bx1, by1, bx2, by2)\n\n def update(self, t):\n if self.initial == None:\n self.initial = self.target.position\n destination = [random.random(), random.random()]\n if self.bounds:\n bounds = self.bounds \n destination[0] = max(destination[0], bounds[0])\n destination[1] = max(destination[1], bounds[1])\n destination[0] = min(destination[0], bounds[2])\n destination[1] = min(destination[1], bounds[3])\n self.destination = rel(*destination)\n \n x = self.initial[0] + (self.destination[0] - self.initial[0]) * t\n y = self.initial[1] - (self.initial[1] - self.destination[1]) * t\n self.target.position = x,y\n\nclass ActionSwitchState(actions.InstantAction):\n def init(self, state):\n self.state = state\n \n def start(self):\n actor = self.target\n idx = self.state\n states = actor._kind.states\n if len(states) >= idx:\n acts = states[idx - 1]\n else:\n acte = actor.actions\n actor.stop()\n actor.actions = acts\n actor.do(acts)\n\nclass ActionSwitchWeapons(actions.InstantAction):\n def init(self, set):\n self.set = set\n \n def start(self):\n actor = self.target\n idx = self.set\n if len(actor.sets) >= idx:\n weapons = actor.set[idx - 1]\n else:\n weapons = actor.weapons\n actor.stopShooting()\n actor.weapons = weapons\n\nclass ActionMoveTo(actions.MoveTo):\n def __init__(self, x, y, duration, randomOffsetX=0, randomOffsetY=0):\n x, y = rel(x, y)\n randomOffsetX, randomOffsetY = rel(randomOffsetX, randomOffsetY)\n nx, ny = x + int((random.random() - 0.5)*randomOffsetX), y + int((random.random() - 0.5)*randomOffsetY), \n super(ActionMoveTo, self).__init__((nx, -ny), duration=duration)\n\nclass ActionMoveBy(actions.MoveBy):\n def __init__(self, x, y, duration, randomOffsetX=0, randomOffsetY=0):\n x, y = rel(x, y)\n randomOffsetX, randomOffsetY = rel(randomOffsetX, randomOffsetY)\n nx, ny = x + int((random.random() - 0.5)*randomOffsetX), y + int((random.random() - 0.5)*randomOffsetY),\n super(ActionMoveBy, self).__init__((nx, ny), duration=duration)\n\nclass ActionFadeTimescale(actions.IntervalAction ):\n '''\n WARNING: Use it for ASprite and it's subclasses only\n '''\n def init( self, ts, duration ):\n self.duration = duration\n self.originalTs = None\n self.ts = ts\n\n def update( self, t ):\n if self.originalTs == None:\n self.originalTs = self.target.timeScale\n ts = self.originalTs + (self.ts - self.originalTs) * t\n self.target.setTimeScale(ts)\n\n'''\nLIBRARY ENTRIES\n'''\nadata['aDie'] = ActionDie()\n\n'''\nMAIN KINDS\n'''\nclass DeviceKind(object):\n position = 0, 9\n damage = 2\n energy = 20\n energyIdle = 5 \n damageToShieldsMod = 1\n ammo = 0\n isGood = True\n name = \"Unknown device\"\n image = \"\"\n \n type = PROJECTILE\n # Projectile and turret params\n velocity = 0, 1600\n lifetime = 0.5\n pof = 1\n \n # Turret params\n rotation = False\n keepTarget = False\n \n # Projectile params\n directions = 0\n angle = 0\n spread = 0\n oneByOne = False\n \n # Ray params\n anchor = 0, 0\n \n # Spawn params\n spawnID = ''\n \n # Effect and Aura params\n runner = None\n \n # Sound\n startSound = None\n loopSound = None\n endSound = None\n soundVolume = 0.5\n \n def __init__(self, dx=0, dy=0):\n super(DeviceKind, self).__init__()\n self.position = self.position[0] + dx, self.position[1] + dy \n if not self.startSound is None:\n self.startSound = loadSound(self.startSound, self.soundVolume)\n if not self.endSound is None:\n self.endSound = loadSound(self.endSound, self.soundVolume)\n if not self.loopSound is None:\n self.loopSound = loadSound(self.loopSound, self.soundVolume)\n \n if self.ammo == 0:\n self.infinite = True\n else:\n self.infinite = False\n \n if self.oneByOne:\n self.tick = 0\n self.amod = 1\n\nclass AvatarKind(object):\n image = loadAnimation('data/graphics/avatarShip.png', 3, 1, 0.1, True)\n life = 100\n engine = 1\n weapons = ()\n weaponSlots = ()\n deviceSlots = (1, 2, 3)\n name = 'Avatar'\n \n def __init__(self):\n super(AvatarKind, self).__init__()\n\nclass NPCKind(object):\n image = loadAnimation('data/graphics/enemy1.png', 2, 1, 0.5, True)\n life = 10\n shields = 0\n shieldsRegen = 0\n damage = 10\n score = 1\n brains = tuple()\n sets = tuple()\n weapons = tuple()\n \n def __init__(self):\n super(NPCKind, self).__init__()\n \n states = []\n for i in self.brains:\n l = loadScript(i)\n blocks = [[]]\n for j in l:\n if j[0] == 'New':\n if blocks[-1]:\n blocks.append([j])\n elif j[0] == 'Repeat':\n if blocks[-1]:\n if blocks[-1][-1][0] == 'New':\n blocks[-1].append(j)\n else:\n blocks.append([j])\n else:\n blocks[-1].append(j)\n else:\n blocks[-1].append(j)\n states.append(self.translate(blocks))\n self.states = states\n \n if states:\n if states[0]:\n self.actions = states[0]\n \n def translate(self, blocks):\n commands = {\n 'SelectTarget': ActionAim,\n 'AimMove': ActionAimMovement,\n 'Wait': actions.Delay,\n 'Die': ActionDie,\n 'MoveBy': ActionMoveBy,\n 'MoveTo': ActionMoveTo,\n 'RandomDelay': actions.RandomDelay,\n 'RandomMovement': ActionRandomMovement,\n 'Shoot': ActionShoot,\n 'StopShooting': ActionStopShooting,\n 'SwitchStates': ActionSwitchState,\n 'SwitchWeapons': ActionSwitchWeapons,\n }\n \n isLoop = False\n isNew = False \n acts = None\n \n for block in blocks:\n if block[0][0] == 'New':\n isNew = True\n del block[0]\n else:\n isNew = False\n if block:\n if block[0][0] == 'Repeat':\n isLoop = True\n del block[0]\n else:\n isLoop = False\n \n if block:\n cmd = block[0]\n current_acts = commands[cmd[0]](*cmd[1:])\n del block[0]\n \n for cmd in block:\n current_acts += commands[cmd[0]](*cmd[1:])\n \n if isLoop:\n current_acts = actions.Repeat(current_acts)\n \n if acts and not isNew:\n acts += current_acts\n if acts and isNew:\n acts = acts | current_acts\n else:\n acts = current_acts\n return acts\n\nclass EffectKind(object):\n name = 'Null'\n duration = 0\n group = EGSIMPLE\n \n def start(self, instance):\n pass\n \n def effect(self, target):\n pass\n \n def check(self, target):\n return True\n \n def end(self, target):\n pass\n\n'''\nEFFECTS\n'''\nclass RechargerKind(EffectKind):\n name = \"Recharge\"\n duration = 2\n \n def start(self, instance):\n target = instance.target\n for i in target.runners:\n if i.group == EGNOSHIELDS:\n i.timeToDie = True\n target.playShield(1)\n \n def check(self, instance):\n if instance.target.absorbedDamage == 0:\n return False\n else:\n return True\n \n def effect(self, target):\n if target.absorbedDamage > 0:\n target.absorbedDamage -= 10\n\nclass ShieldOverloadKind(EffectKind):\n name = 'Shields overloaded'\n group = EGNOSHIELDS\n \n def start(self, instance):\n target = instance.target\n instance.duration = target.shields / target.shieldsRegen\n target.playShield(-1)\n \n def effect(self, target):\n target.absorbedDamage = target.shields\n \n def end(self, instance):\n if not instance.timeToDie:\n target = instance.target\n target.absorbedDamage = target.shields * 3 / 4\n target.playShield(1)\n\nclass DefenderKind(EffectKind):\n name = \"Defended\"\n distance = 200\n \n def start(self, instance):\n target = instance.target\n target.shields += 80\n target.shieldsRegen += 8\n target.playShield(1)\n \n def check(self, instance):\n if instance.source._gonnaDie:\n return False\n s = instance.source.position\n t = instance.target.position\n return (s[0] - t[0])**2 + (s[1] - t[1])**2 <= self.distance**2 \n \n def end(self, instance):\n target = instance.target\n target.shields -= 80\n target.shieldsRegen -= 8 \n if target.shields == 0:\n target.playShield(-1)\n\neffectsData['eOverload'] = ShieldOverloadKind()\neffectsData['eRecharge'] = RechargerKind()\neffectsData['eDefend'] = DefenderKind()\n\n'''\nMAIN GAME ELEMENTS\n'''\nclass ASprite(sprite.Sprite):\n class ActionScalableInterval(actions.Action):\n def init(self, one, ts=1):\n self.one = one\n self.timeScale = ts\n \n def start(self):\n self.current_action = copy.deepcopy(self.one)\n self.current_action.target = self.target\n self.current_action.start()\n \n def step(self, dt):\n self._elapsed += dt*self.timeScale\n self.current_action.step(dt*self.timeScale)\n if self.current_action.done():\n self.current_action.stop()\n self._done = True\n \n def stop(self):\n if not self._done:\n self.current_action.stop()\n \n def setTimeScale(self, ts):\n self.timeScale = ts\n \n def __init__(self, *args):\n super(ASprite, self).__init__(*args)\n self.timeScale = 1\n \n def do(self, action):\n new = self.ActionScalableInterval(action, self.timeScale)\n super(ASprite, self).do(new)\n \n def doUnscaled(self, action):\n super(ASprite, self).do(action)\n \n def setTimeScale(self, ts):\n self.timeScale = ts\n for j in self.actions:\n if issubclass(j.__class__, self.ActionScalableInterval):\n j.setTimeScale(ts)\n\nclass EffectRunner(batch.BatchableNode):\n \n def __init__(self, kind, target, source=None):\n super(EffectRunner, self).__init__()\n self.target = target\n self.name = kind.name\n self.group = kind.group\n self.duration = kind.duration\n if source == None:\n self.source = target\n else:\n self.source = source\n kind.start(self)\n \n if self.duration == 0:\n self.constant = True\n else:\n self.constant = False\n \n self.effect = kind.effect\n self.check = kind.check\n self.end = kind.end\n target.runners.append(self)\n self.schedule_interval(self.update, 1)\n target.add(self)\n self.timeToDie = False\n \n def set_batch(self, batch, groups=None, z=0):\n pass\n \n def update(self, *args):\n l = self.target.runners\n if (not self.check(self)) or self.timeToDie:\n self.end(self)\n if self in l:\n l.remove(self)\n self.kill()\n elif not self.constant:\n self.duration -= 1\n if self.duration == 0:\n self.end(self)\n if self in l:\n l.remove(self)\n self.kill()\n\nclass Bullet(ASprite):\n def __init__(self, owner, kind, target=None, angle=0):\n super(Bullet, self).__init__(kind.image)\n self.position = owner.position[0] + kind.position[0], owner.position[1] + kind.position[1]\n self.damage = kind.damage\n if angle == 0:\n self.velocity = kind.velocity\n self.rotation = 0\n else:\n speed = kind.velocity[1]\n a = (90 - angle)/57.3\n self.velocity = speed * math.cos(a), speed * math.sin(a)\n self.rotation = angle\n self.isGood = kind.isGood\n self._needRotate = kind.rotation\n self._kind = kind\n self._speed = kind.velocity[1]\n self.damageToShieldMod = kind.damageToShieldMod\n\n lifetime = kind.lifetime\n \n if self.isGood:\n currents['layerObject'].avatarBullets.append(self)\n else:\n currents['layerObject'].enemyBullets.append(self)\n \n if not target is None:\n self.aim(target)\n if kind.keepTarget:\n self.target = target\n self.schedule_interval(self.reAim, 0.1)\n \n used = bulletsUsed.get(kind, [])\n used.append(self)\n bulletsUsed[kind] = used\n \n currents['layerObject'].add(self, z=5)\n self._actions = adata['aMove'] | actions.Delay(lifetime) + adata['aDie']\n self.do(self._actions)\n \n def aim(self, target=None):\n speed = abs(self._kind.velocity[1])\n angle = math.atan2(target.position[1] - self.position[1], target.position[0] - self.position[0])\n dy = speed * math.sin(angle)\n dx = speed * math.cos(angle)\n self.velocity = dx, dy\n if self._needRotate:\n self.rotation = int(90 - angle*57.3)\n \n def reAim(self, *args):\n if self.target == None or self.target._gonnaDie:\n self.unschedule(self.reAim)\n return\n else:\n speed = self._speed\n target = self.target\n angle = math.atan2(target.position[1] - self.position[1], target.position[0] - self.position[0])\n dy = speed * math.sin(angle)\n dx = speed * math.cos(angle)\n self.velocity = dx, dy\n if self._needRotate:\n self.rotation = int(90 - angle*57.3)\n \n def kill(self):\n if self.isGood:\n currents['layerObject'].avatarBullets.remove(self)\n else:\n currents['layerObject'].enemyBullets.remove(self)\n if self._kind.keepTarget:\n self.unschedule(self.reAim)\n \n kind = self._kind\n bulletsUsed[kind].remove(self)\n free = bulletsFree.get(kind, [])\n free.append(self)\n bulletsFree[kind] = free\n currents['layerObject'].remove(self)\n self.stop()\n \n def reinstate(self, owner, target=None, angle=0):\n kind = self._kind\n self.position = owner.position[0] + kind.position[0], owner.position[1] + kind.position[1]\n if angle == 0:\n self.velocity = kind.velocity\n self.rotation = 0\n else:\n speed = kind.velocity[1]\n a = (90 - angle)/57.3\n self.velocity = speed * math.cos(a), speed * math.sin(a)\n self.rotation = angle\n \n if self.isGood:\n currents['layerObject'].avatarBullets.append(self)\n else:\n currents['layerObject'].enemyBullets.append(self)\n \n if not target is None:\n self.aim(target)\n if kind.keepTarget:\n self.target = target\n self.schedule_interval(self.reAim, 0.1)\n \n bulletsFree[kind].remove(self)\n bulletsUsed[kind].append(self)\n \n currents['layerObject'].add(self, z=5)\n self.do(self._actions)\n\nclass Ray(ASprite):\n def __init__(self, owner, kind):\n super(Ray, self).__init__(kind.image)\n self.image_anchor = kind.anchor\n self._kind = kind\n self.rotation = kind.rayRotation\n self.offset = (kind.position[0], kind.position[1])\n self.position = owner.position[0] + kind.position[0], owner.position[1] + kind.position[1]\n self.damage = kind.damage\n self.isGood = kind.isGood\n self.layer = owner.owner\n self.owner = owner\n self.timeScale = owner.timeScale\n if self.isGood:\n self.layer.avatarRay.append(self)\n else:\n self.layer.enemyRay.append(self)\n self.damageToShieldMod = kind.damageToShieldMod\n \n self.layer.add(self, z=1)\n \n def kill(self):\n if self.isGood:\n self.layer.avatarRay.remove(self)\n else:\n self.layer.enemyRay.remove(self)\n self.owner.rays.remove(self)\n super(Ray, self).kill()\n\nclass Avatar(ASprite):\n\n \n def __init__(self, owner, kind):\n self.settings = Settings()\n super(Avatar, self).__init__(kind.image)\n self.owner = owner\n self.life = kind.life\n self._kind = kind\n self.shields = 0\n self.shieldsRegen = 0\n self.absorbedDamage = 0.0\n self.takenDamage = 0\n self.weapons = tuple()\n self.devices = tuple()\n self._wSlots = kind.weaponSlots\n self._dSlots = kind.deviceSlots\n self.engine = kind.engine\n self.consume = 0\n self.hp = self.life\n self.sp = self.shields\n self.runners = []\n self.schedule_interval(self.regen, 0.1)\n self._gonnaDie = False\n self.damage = self.life\n self.damageToShieldMod = 1\n \n def setup(self, gunsList, weaponsList, devicesList, shieldsList, enginesList, reactorList):\n settings = self.settings\n self.shields = shieldsList[settings.avatarShields][1]\n self.shieldsRegen = shieldsList[settings.avatarShields][2]\n self.engine = enginesList[settings.avatarEngine][1]\n self.reactor = reactorList[settings.avatarReactor][1]\n self.consume = enginesList[settings.avatarEngine][2] + shieldsList[settings.avatarShields][3]\n weapons = []\n if len(self._wSlots) >= 1:\n weapons.append(gunsList[settings.avatarGun](self._wSlots[0]))\n self.consume += gunsList[settings.avatarGun].energyIdle\n if len(self._wSlots) >= 2:\n weapons.append(gunsList[settings.avatarGun](self._wSlots[1]))\n weapons[-1].amod = -1\n self.consume += gunsList[settings.avatarGun].energyIdle\n for i in self.settings.avatarWeapons:\n if len(weapons) < len(self._wSlots):\n weapons.append(weaponsList[i](self._wSlots[len(weapons)]))\n self.consume += weaponsList[i].energyIdle\n self.weapons = tuple(weapons)\n devices = []\n for i in self.settings.avatarDevices:\n if len(devices) < len(self._dSlots):\n devices.append(devicesList[i](self._dSlots[len(devices)]))\n self.consume += devicesList[i].energyIdle\n self.devices = tuple(devices)\n \n \n def takeDamage(self, source):\n damage = source.damage\n damageToShieldMod = source.damageToShieldMod\n if self.shields - self.absorbedDamage > 0:\n self.absorbedDamage += damage * damageToShieldMod\n if self.absorbedDamage > self.shields:\n self.takenDamage += (self.absorbedDamage - self.shields) / float(damageToShieldMod)\n self.absorbedDamage = self.shields\n EffectRunner(effectsData['eOverload'], self)\n else:\n self.playShield()\n else:\n self.takenDamage += damage\n \n if self.takenDamage > self.life:\n self.owner.killAvatar()\n \n self.hp = self.life - self.takenDamage\n self.sp = self.shields - self.absorbedDamage\n log(self._kind.idString, ' takes ', damage, ' dmg from ', source._kind.idString)\n \n def regen(self, *args):\n modShields = 1.0\n modSpeed = 1.0\n \n rc = self.consume * 100 / self.reactor\n \n if rc < 80:\n modShields = modShields * 80 / rc\n modSpeed = modSpeed * 80 / rc\n elif rc < 100:\n pass\n else:\n modShields = modShields * 80 / rc\n modSpeed = modSpeed * 80 / rc\n \n if self.absorbedDamage > 0:\n self.absorbedDamage -= self.shieldsRegen*modShields/10\n if self.absorbedDamage < 0:\n self.absorbedDamage = 0\n \n for r in self.runners:\n r.effect(self)\n \n self.sp = self.shields - self.absorbedDamage\n self.setTimeScale(modSpeed)\n \n def playShield(self, idx=0):\n def die(object):\n object.kill()\n if idx == 0:\n shield = sprite.Sprite(loadAnimation('data/graphics/ShieldAvatar.png', 4, 1, 0.05))\n elif idx > 0:\n shield = sprite.Sprite(loadAnimation('data/graphics/ShieldAvatarRevived.png', 4, 1, 0.05))\n elif idx < 0:\n shield = sprite.Sprite(loadAnimation('data/graphics/ShieldAvatarBlocked.png', 4, 1, 0.05))\n self.add(shield)\n shield.do(adata['aDelay03'] + actions.CallFuncS(die))\n \n def kill(self):\n if not self._gonnaDie:\n self._gonnaDie = True\n self.stop()\n super(Avatar, self).kill()\n\nclass NPCShip(ASprite):\n def __init__(self, owner, kind, x, y, coordZ=4):\n super(NPCShip, self).__init__(kind.image)\n self.owner = owner\n self.life = kind.life\n self.shields = kind.shields\n self.shieldsRegen = kind.shieldsRegen\n self.absorbedDamage = 0.0\n self.takenDamage = 0\n self.damage = kind.damage\n self.damageToShieldMod = 1\n self.score = kind.score\n self.weapons = kind.weapons\n self.settings = Settings()\n self.rays = []\n self.position = rel(x,y)\n self.soundList = []\n self.runners = []\n self.aura = None \n self._gonnaDie = False\n self._shieldSize = kind.image.get_max_height() / 36.0\n self._auraCache = []\n self._kind = kind\n self._target = None\n self.velocity = 0, 0\n self.lifeMeter = None\n used = shipsUsed.get(kind, [])\n used.append(self)\n shipsUsed[kind] = used\n self.schedule_interval(self.regen, 1)\n self.do(kind.actions)\n owner.add(self, z=coordZ)\n \n def takeDamage(self, source):\n damage = source.damage\n damageToShieldMod = source.damageToShieldMod\n if self.shields - self.absorbedDamage > 0:\n self.absorbedDamage += damage * damageToShieldMod\n if self.absorbedDamage > self.shields:\n self.takenDamage += (self.absorbedDamage - self.shields) / float(damageToShieldMod)\n self.absorbedDamage = self.shields\n EffectRunner(effectsData['eOverload'], self)\n else:\n self.playShield()\n else:\n self.takenDamage += damage\n \n if self.takenDamage > self.life:\n self.owner.addExplosion(self.position)\n self.owner.score += self.score\n self.kill()\n if self.lifeMeter:\n self.lifeMeter.kill()\n self.lifeMeter = None\n log(self._kind.idString, ' takes ', damage, ' dmg from ', source._kind.idString)\n \n \n def shoot(self, target=None):\n if len(self.rays) > 0:\n laser = False\n else:\n laser = True\n for w in self.weapons:\n if w.type == PROJECTILE or w.type == TURRET:\n free = bulletsFree.get(w, []) \n if target:\n if free:\n free[0].reinstate(self, target)\n else:\n Bullet(self, w, target)\n else:\n if free:\n free[0].reinstate(self)\n else:\n Bullet(self, w)\n elif laser and w.type == RAY:\n self.rays.append(Ray(self, w))\n elif w.type == AURA:\n self.aura = w.runner\n elif w.type == SPAWN:\n pos = abs2rel(*self.position)\n Enemy(self.owner, enemies[w.spawnID], pos[0], pos[1])\n if self.settings.sound:\n if not w.startSound is None:\n w.startSound.play()\n if not w.loopSound is None:\n if not w.loopSound in self.soundList:\n w.loopSound.play(-1)\n self.soundList.append(w.loopSound)\n \n def stopShooting(self):\n for i in self.rays:\n i.kill()\n self.aura = None\n for i in self.soundList:\n i.stop()\n del self.soundList[:]\n \n def kill(self):\n if not self._gonnaDie:\n layer = currents['layerObject']\n self._gonnaDie = True\n if self.good:\n layer.avatarHelpers.remove(self)\n else:\n layer.enemies.remove(self)\n if self.owner.target == self:\n self.owner.target = None\n kind = self._kind\n shipsUsed[kind].remove(self)\n free = shipsFree.get(kind, [])\n free.append(self)\n shipsFree[kind] = free\n self.unschedule(self.regen)\n layer.remove(self)\n self.stop()\n \n def reinstate(self, x, y, target=None, coordZ=4):\n layer = currents['layerObject']\n kind = self._kind\n self.absorbedDamage = 0.0\n self.takenDamage = 0\n self.weapons = kind.weapons\n self.settings = Settings()\n self.rays = []\n self.position = rel(x,y)\n self.target = target\n self.soundList = []\n self.runners = []\n self.aura = None \n self.schedule_interval(self.regen, 1)\n self.do(kind.actions)\n self._gonnaDie = False\n self._auraCache = []\n self.velocity = 0, 0\n layer.add(self, z=coordZ)\n if self.good:\n layer.avatarHelpers.append(self)\n else:\n layer.enemies.append(self)\n \n shipsFree[kind].remove(self)\n shipsUsed[kind].append(self)\n \n layer.add(self, z=5)\n \n def disarm(self):\n self.weapons = tuple()\n \n def regen(self, *args):\n if self.absorbedDamage > 0:\n self.absorbedDamage -= self.shieldsRegen\n if self.absorbedDamage < 0:\n self.absorbedDamage = 0\n \n for r in self.runners:\n r.effect(self)\n \n if self.aura:\n aura = self.aura\n for e in currents['layerObject'].enemies:\n p = self.position\n ep = e.position\n if (p[0] - ep[0]) ** 2 - (p[1] - ep[1]) ** 2 <= aura.distance**2:\n if not e in self._auraCache:\n self._auraCache.append(e)\n EffectRunner(aura, e, self)\n elif e in self._auraCache:\n self._auraCache.remove(e)\n \n \n def playShield(self, idx=0):\n def die(object):\n object.kill()\n if idx == 0:\n shield = sprite.Sprite(loadAnimation('data/graphics/ShieldEnemy.png', 4, 1, 0.05))\n elif idx > 0:\n shield = sprite.Sprite(loadAnimation('data/graphics/ShieldEnemyRevived.png', 4, 1, 0.05))\n elif idx < 0:\n shield = sprite.Sprite(loadAnimation('data/graphics/ShieldEnemyBlocked.png', 4, 1, 0.05))\n shield.scale = self._shieldSize\n self.add(shield)\n shield.do(adata['aDelay03'] + actions.CallFuncS(die))\n\nclass Enemy(NPCShip):\n def __init__(self, owner, kind, x, y):\n super(Enemy, self).__init__(owner, kind, x, y)\n owner.enemies.append(self)\n self.good = False\n\nclass Helper(NPCShip):\n def __init__(self, owner, kind, x, y, target=None):\n super(Helper, self).__init__(owner, kind, x, y, 8)\n owner.avatarHelpers.append(self)\n self.good = True","sub_path":"shuan_work/Prototype/modules/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":32865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"361773445","text":"from FourierWindow import *\n\n\nclass FSeriesWindow(FourierWindow):\n \n def __init__(self, root):\n self.title = 'Fourier Series'\n \n self.maxTerms=100\n \n self.signalType = 0\n \n FourierWindow.__init__(self, root)\n \n \n ############################################################################ \n # Contains the different options for the signals, using checkboxes\n #\n ############################################################################ \n def makeLeftPane(self):\n self.dic = {'sqrt(sin(x))': (lambda x: np.where(np.sin(2*pi*x)>0, np.sqrt(np.sin(2*pi*x)), 0.)), \n 'f2' : (lambda x: np.where(signal.sawtooth(2*pi*x)>0, signal.sawtooth(2*pi*x), -1)),\n 'Square' : (lambda x: signal.square(2*pi*x)), 'Sawtooth' : (lambda x: signal.sawtooth(2*pi*x))}\n \n varTitles = ['Function',\"Gibb's Effect Correction\"]\n varDTypes = [StringVar, BooleanVar]\n varDefaults = [self.dic.keys()[0], False]\n varTexts = [self.dic.keys(),['None', 'Yes']]\n varVals = [self.dic.keys(), [False,True]]\n\n optionsSpecs = [varTitles, varDTypes, varDefaults, varTexts, varVals]\n \n self._makeLeftPane(optionsSpecs)\n \n self.funcText = self.options[0]\n self.gibbs = self.options[1]\n \n ############################################################################ \n # Contains the plots and frequency sliders at the bottom\n #\n ############################################################################ \n def makeRightPane(self):\n varNames = ['Num. Terms']\n varLimits = [(0,self.maxTerms)]\n varRes = [1]\n varDTypes = [IntVar]\n varDefaults = [1]\n varValues = [varNames, varLimits, varRes, varDTypes, varDefaults]\n \n self._makeRightPane((2,2), [varValues])\n \n self.numTerms = self.vars[0][0]\n \n ############################################################################ \n # Initializes the signals in the plots\n #\n ############################################################################ \n def initSignals(self):\n self._initSignals()\n \n def cn(self, x, y, n, period):\n c = y * np.exp(-1j * 2. * np.pi * n * x / period)\n return c.sum()/c.size\n \n def fSeries(self, x, y, Nh, period):\n rng = np.arange(0., Nh)\n coeffs = np.array([self.cn(x,y,i,period) for i in rng])\n if self.gibbs.get():\n f = np.array([(2. if i>0 else 1.) * coeffs[i] * np.sinc(i*np.pi/(2*Nh)) * np.exp(1j*2*i*np.pi*x/period) for i in rng])\n else:\n f = np.array([(2. if i>0 else 1.) * coeffs[i] * np.exp(1j*2*i*np.pi*x/period) for i in rng])\n return coeffs, f.sum(axis=0)\n ############################################################################ \n # Updates the plots when anything is changed\n #\n ############################################################################ \n #TODO keep variable of FFT of each level so don't have to compute each time\n def updatePlots(self):\n funcText = self.funcText.get()\n func = self.dic[funcText]\n \n dt = 4./1024\n t = np.linspace(-2,2-dt,1024)\n y = func(t)\n #print sum(y)/len(t), t[0], t[-1]\n n = self.numTerms.get()\n \n coeffs, approx = self.fSeries(t,y,n,1.)\n \n self.axes[2].cla()\n self.axes[2].grid()\n self.axes[3].cla()\n self.axes[3].grid()\n \n self.lines[0].set_data(t,y)\n self.lines[1].set_data(t,approx)\n self.axes[2].stem(coeffs.real,basefmt='k:')\n self.axes[3].stem(-coeffs.imag,basefmt='k:')\n\n self.formatAxes(self.axes[0],t,y,'Time (ms)','Amplitude',funcText)\n self.formatAxes(self.axes[1],t,approx,'Time (ms)','Amplitude','Approximation of '+funcText)\n self.formatAxes(self.axes[2],range(-1,n+1),coeffs.real,'Frequency (kHz)','Coefficient','Cosine Coefficients')\n self.formatAxes(self.axes[3],range(-1,n+1),-coeffs.imag,'Frequency (kHz)','Coefficient','Sine Coefficients')\n \n if max(coeffs.real) < 0: self.axes[2].set_ylim([self.axes[2].get_ylim()[0], 0])\n if min(coeffs.real) > 0: self.axes[2].set_ylim([0, self.axes[2].get_ylim()[1]])\n if max(-coeffs.imag) < 0: self.axes[3].set_ylim([self.axes[3].get_ylim()[0], 0])\n if min(-coeffs.imag) > 0: self.axes[3].set_ylim([0, self.axes[3].get_ylim()[1]])\n \n [ax.axhline(color='k') for ax in self.axes]\n #for fig in self.figs:\n self.fig.canvas.draw_idle()\n self.fig.tight_layout()\n #fig.tight_layout()\n \nif __name__ == \"__main__\":\n root = Tk()\n FSeriesWindow(root)\n \n if os.name == \"nt\": root.wm_state('zoomed')\n else: root.attributes('-zoomed', True)\n\n root.mainloop() \n \n","sub_path":"FSeriesWindow.py","file_name":"FSeriesWindow.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"279870731","text":"# -*- coding:utf-8 -*-\n\nimport os\nimport shutil\nimport string\n\n\ndef del_files(dir, topdown=True):\n for root, dirs, files in os.walk(dir, topdown):\n for name in files:\n pathname = os.path.splitext(os.path.join(root, name))\n if (pathname[1] == \".out\" or pathname[1] == '.o'):\n os.remove(os.path.join(root, name))\n\n\ndef clean():\n if os.path.exists('./test/CMakefiles'):\n shutil.rmtree('./test/CMakefiles')\n if os.path.exists('./build'):\n shutil.rmtree('./build')\n if os.path.exists('./install-dir'):\n shutil.rmtree('./install-dir')\n del_files('./test')\n if os.path.exists('./test/CMakeCache.txt'):\n os.remove('./test/CMakeCache.txt')\n\n\nif __name__ == '__main__':\n clean()\n","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"366869609","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd\nimport sys\n\ndriver = webdriver.Chrome(\"본인 웹드라이버 저장 주소\")\ndriver.get('http://ssullog.joins.com/speech/speechList')\n\n#크롤링할 페이지로 들어가기\n#태그는 페이지 열 때마다 새로 설정해두어야 한다.\nsample = driver.find_element_by_css_selector('#speech_no_24713 > div.box-header > div > a')\nsample.send_keys('\\n')\ntime.sleep(5)\n\n#크롤링 해오기\nhtml = driver.page_source\nsoup = BeautifulSoup(html, \"html.parsar\")\n\nnotices = soup.select(\"pop_search > div.db > div.box01\")\n\nfor n in notices:\n\tprint(n.text.strip())\n\tcontents = n.text.strip() #필요한 텍스트를 contents에 저장 \n\n\n#페이지 닫기\ncloser = driver.find_element_by_id('btn_layer_speechall')\ncloser.send_keys('\\n')\n\n\n#텍스트를 파일로 저장하기\nsys.stdout = open('output.txt','w')\nprint(contents) #print()안의 내용이 output.txt 파일에 저장됨.\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"411005661","text":"import numpy as np\r\nimport math as m\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport unittest\r\n\r\n\"\"\"QUESTION 3\"\"\"\r\n\"\"\"Resolution of the linear system Ax=b by the conjugate gradient method where A is a symmetric positive definite matrix and b a vector (without preconditioning)\"\"\"\r\n\r\n\r\ndef conjgrad(A,b,x) :\r\n r=b-np.dot(A,x)\r\n p=r\r\n rsOld= float(np.dot(np.transpose(r),r))\r\n tab_x=[]\r\n iter=[]\r\n for i in range(1,100001):\r\n Ap=np.dot(A,p)\r\n n = float(np.dot(np.transpose(p),Ap))\r\n alpha=rsOld/n\r\n x=x+ alpha*p\r\n r=r- alpha*Ap\r\n rsNew=float(np.dot(np.transpose(r),r))\r\n tab_x+=[x]\r\n iter+=[i]\r\n if m.sqrt(rsNew) < 1e-10 :\r\n max_iteration=i\r\n break\r\n p=r+rsNew/rsOld*p\r\n rsOld=rsNew\r\n #print('number of iteration needed to find X',max_iteration) \r\n return x#x is the solution\r\n #tab_x is a list regrouping the values of x at each iteration\r\n #iter is a list regrouping the iterations before arriving at the final solution. \r\n\"\"\"QUESTION 4\"\"\"\r\n\r\ndef somme(T, i, j):\r\n return sum(T[i][k] * T[j][k] for k in range(j))\r\n\r\ndef facto_dense_inc(A):\r\n (n, n1) = A.shape\r\n T = np.zeros((n,n))\r\n for i in range(n):\r\n for j in range(i + 1):\r\n if A[i][j] != 0:\r\n if i==j:\r\n T[i][j] = m.sqrt((A[i][i] - somme(T, i, j)))\r\n else:\r\n T[i][j] = (A[i][j] - somme(T, i, j)) / T[j][j]\r\n return T\r\n\r\n\r\ndef preconditioner(A):\r\n T= facto_dense_inc(A)\r\n return np.dot(T,np.transpose(T))\r\n\r\ndef PreconditionedConjgrad(A,b,x):\r\n r=b-np.dot(A,x)\r\n M=preconditioner(A)\r\n z=np.dot(np.linalg.inv(M),r)\r\n p=z\r\n for k in range(1,100001):\r\n alpha=(np.dot(np.transpose(r),z))/(np.dot(np.transpose(p),np.dot(A,p)))\r\n x=x+ alpha*p\r\n r2=r-alpha*(np.dot(A,p))\r\n rsNew=float(np.dot(np.transpose(r2),r2))\r\n if m.sqrt(rsNew) < 1e-10 :\r\n max_iteration=k\r\n break\r\n z2=np.dot(np.linalg.inv(M),r2) \r\n b=(np.dot(np.transpose(z2),r2))/(np.dot(np.transpose(z),r))\r\n z=z2\r\n r=r2\r\n print('number of iteration needed to find X',max_iteration) \r\n return x\r\n\r\n##****************TEST: Conjugate gradient and Preconditionned Conjugate gradient methods \r\nclass Test_gradient(unittest.TestCase):\r\n def test_conjgrad(self):\r\n A= np.array([[4,1],[1,3]])\r\n b= np.array([1,2])\r\n x= np.array([2,1])\r\n expected= np.array([0.0909,0.6363])\r\n result= conjgrad(A,b,x)\r\n for i in range(len(A)):\r\n self.assertAlmostEqual(result[i],expected[i],3)\r\n def test_conjgrad_precond(self):\r\n A= np.array([[4,1],[1,3]])\r\n b= np.array([1,2])\r\n x= np.array([2,1])\r\n expected= np.array([0.0909,0.6363])\r\n result= PreconditionedConjgrad(A,b,x)\r\n print()\r\n for i in range(len(A)):\r\n self.assertAlmostEqual(result[i],expected[i],3) \r\n'''\r\ndef measure_execution_time():\r\n A= np.array([[4,1],[1,3]])\r\n b= np.array([1,2])\r\n x= np.array([2,1])\r\n init_time1= time.time()\r\n PreconditionedConjgrad(A,b,x)\r\n final_time1= time.time() \r\n init_time2= time.time()\r\n conjgrad(A,b,x)\r\n final_time2= time.time() \r\n print('time needed for non precondionned method to find X',final_time1-init_time1) \r\n print('time needed for precondionned method to find X',final_time2-init_time2) \r\nmeasure_execution_time()'''\r\n#s1=TestConjgrad_1(100)\r\n##We generate a curve representing the variations of the relative error according to the number of iterations\r\n##relative error = the difference in magnitudes between the expected solution and the solution found by the algorithm \r\n\r\n#------------->inutile de tracer les tests suffisent (mais a vous de voir)\r\n\r\n#plt.plot(s1[1],s1[0])\r\n#plt.xlabel('Number of iterations')\r\n#plt.ylabel('Relative error')\r\n#plt.title('Variations of the relative error in the resolution of Ax=b') \r\n#plt.show() \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main(Test_gradient(), verbosity = 2)\r\n","sub_path":"Partie_tests/gradient_final1.py","file_name":"gradient_final1.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"615691144","text":"\" Specialized entry to estimate Gibbs Free Energy for a solid\"\nimport hashlib\nfrom itertools import combinations\nfrom typing import List, Optional\n\nimport numpy as np\nfrom monty.json import MontyDecoder\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.entries.computed_entries import ComputedEntry, ConstantEnergyAdjustment\nfrom scipy.interpolate import interp1d\n\nfrom rxn_network.data import G_ELEMS\n\n\nclass GibbsComputedEntry(ComputedEntry):\n \"\"\"\n An extension to ComputedEntry which estimates the Gibbs free energy of formation\n of solids using energy adjustments from the machine-learned SISSO descriptor from\n Bartel et al. (2018).\n\n WARNING: This descriptor only applies to solids. See\n entries.nist.NISTReferenceEntry for common gases (e.g. CO2).\n \"\"\"\n\n def __init__(\n self,\n composition: Composition,\n formation_energy_per_atom: float,\n volume_per_atom: float,\n temperature: float,\n energy_adjustments: Optional[List] = None,\n parameters: Optional[dict] = None,\n data: Optional[dict] = None,\n entry_id: Optional[object] = None,\n ):\n \"\"\"\n\n A new computed entry object is returned with a supplied energy correction\n representing the difference between the formation enthalpy at T=0K and the\n Gibbs formation energy at the specified temperature.\n\n Args:\n composition: The composition object (pymatgen)\n formation_energy_per_atom: Calculated formation enthalpy, dH, at T = 298 K,\n normalized to the total number of atoms in the composition.\n volume_per_atom: The total volume of the associated structure divided by\n the total number of atoms.\n temperature: Temperature [K] by which to acquire dGf(T), must be selected\n from a range of [300, 2000] K. If temperature is not selected from\n one of [300, 400, 500, ... 2000 K], then free energies will be\n interpolated.\n energy_adjustments: Optional list of energy adjustments\n parameters: Optional list of calculation parameters\n data: Optional dictionary containing entry data\n entry_id: Optional entry-id, such as the entry's mp-id\n \"\"\"\n self._composition = Composition(composition)\n self.formation_energy_per_atom = formation_energy_per_atom\n self.volume_per_atom = volume_per_atom\n self.temperature = temperature\n\n num_atoms = self._composition.num_atoms\n\n if temperature < 300 or temperature > 2000:\n raise ValueError(\"Temperature must be selected from range: [300, 2000] K.\")\n\n if energy_adjustments is not None:\n energy_adjustments = [\n adjustment\n for adjustment in energy_adjustments\n if adjustment.name != \"Gibbs SISSO Correction\"\n ]\n else:\n energy_adjustments = []\n\n energy_adjustments.append(\n ConstantEnergyAdjustment(\n self.gibbs_adjustment(temperature),\n uncertainty=0.05 * num_atoms, # descriptor has ~50 meV/atom MAD\n name=\"Gibbs SISSO Correction\",\n description=f\"Gibbs correction: dGf({self.temperature} K) - dHf (298 K)\",\n )\n )\n\n formation_energy = num_atoms * formation_energy_per_atom\n\n super().__init__(\n composition=composition,\n energy=formation_energy,\n energy_adjustments=energy_adjustments,\n parameters=parameters,\n data=data,\n entry_id=entry_id,\n )\n\n def get_new_temperature(self, new_temperature: float) -> \"GibbsComputedEntry\":\n \"\"\"\n Return a copy of the GibbsComputedEntry at the new specified temperature.\n\n Args:\n new_temperature: The new temperature to use [K]\n\n Returns:\n A copy of the GibbsComputedEntry at the new specified temperature.\n \"\"\"\n new_entry_dict = self.as_dict()\n new_entry_dict[\"temperature\"] = new_temperature\n\n new_entry = self.from_dict(new_entry_dict)\n return new_entry\n\n def gibbs_adjustment(self, temperature: float) -> float:\n \"\"\"\n Returns the difference between the predicted Gibbs formation energy and the\n formation enthalpy at 298 K, i.e., dGf(T) - dHf(298 K). Calculated using\n SISSO descriptor from Bartel et al. (2018) and elemental chemical potentials\n (FactSage).\n\n Units: eV (not normalized)\n\n Reference: Bartel, C. J., Millican, S. L., Deml, A. M., Rumptz, J. R.,\n Tumas, W., Weimer, A. W., … Holder, A. M. (2018). Physical descriptor for\n the Gibbs energy of inorganic crystalline solids and\n temperature-dependent materials chemistry. Nature Communications, 9(1),\n 4168. https://doi.org/10.1038/s41467-018-06682-4\n\n Args:\n temperature: The absolute temperature [K].\n Returns:\n The correction to Gibbs free energy of formation (eV) from DFT energy.\n \"\"\"\n if self._composition.is_element:\n return 0\n\n num_atoms = self._composition.num_atoms\n reduced_mass = self._reduced_mass(self._composition)\n\n return num_atoms * self._g_delta_sisso(\n self.volume_per_atom, reduced_mass, temperature\n ) - self._sum_g_i(self._composition, temperature)\n\n @staticmethod\n def _g_delta_sisso(\n volume_per_atom: float, reduced_mass: float, temp: float\n ) -> float:\n \"\"\"\n G^delta as predicted by SISSO-learned descriptor from Eq. (4) in\n Bartel et al. (2018).\n\n Args:\n vol_per_atom: volume per atom [Å^3/atom]\n reduced_mass: reduced mass as calculated with pair-wise sum formula [amu]\n temp: Temperature [K]\n\n Returns:\n float: G^delta\n \"\"\"\n\n return (\n (\n -2.48e-4 * np.log(volume_per_atom)\n - 8.94e-5 * reduced_mass / volume_per_atom\n )\n * temp\n + 0.181 * np.log(temp)\n - 0.882\n )\n\n @staticmethod\n def _sum_g_i(composition, temperature) -> float:\n \"\"\"\n Sum of the stoichiometrically weighted chemical potentials [eV] of the elements\n at specified temperature, as acquired from \"elements.json\".\n \"\"\"\n elems = composition.get_el_amt_dict()\n\n if temperature % 100 > 0:\n sum_g_i = 0\n for elem, amt in elems.items():\n g_interp = interp1d(\n [float(t) for t in G_ELEMS.keys()],\n [g_dict[elem] for g_dict in G_ELEMS.values()],\n )\n sum_g_i += amt * g_interp(temperature)\n else:\n sum_g_i = sum(\n [amt * G_ELEMS[str(temperature)][elem] for elem, amt in elems.items()]\n )\n\n return sum_g_i\n\n @staticmethod\n def _reduced_mass(composition: Composition) -> float:\n \"\"\"\n Reduced mass [amu] as calculated via Eq. 6 in Bartel et al. (2018),\n to be used in SISSO descriptor equation.\n \"\"\"\n reduced_comp = composition.reduced_composition\n num_elems = len(reduced_comp.elements)\n elem_dict = reduced_comp.get_el_amt_dict()\n\n denominator = (num_elems - 1) * reduced_comp.num_atoms\n\n all_pairs = combinations(elem_dict.items(), 2)\n mass_sum = 0\n\n for pair in all_pairs:\n m_i = Composition(pair[0][0]).weight\n m_j = Composition(pair[1][0]).weight\n alpha_i = pair[0][1]\n alpha_j = pair[1][1]\n\n mass_sum += (alpha_i + alpha_j) * (m_i * m_j) / (m_i + m_j)\n\n reduced_mass = (1 / denominator) * mass_sum\n\n return reduced_mass\n\n @classmethod\n def from_structure(\n cls,\n structure: Structure,\n formation_energy_per_atom: float,\n temperature: float,\n **kwargs,\n ) -> \"GibbsComputedEntry\":\n \"\"\"\n Constructor method for building a GibbsComputedEntry from a structure,\n formation enthalpy, and temperature.\n\n Args:\n structure: Structure object (pymatgen)\n formation_energy_per_atom: Formation enthalpy at T = 298 K associated\n with structure\n temperature: Desired temperature [K] for acquiring dGf(T)\n **kwargs: Optional kwargs to be passed to init method of GibbsComputedEntry\n\n Returns:\n A new GibbsComputedEntry object\n \"\"\"\n composition = structure.composition\n volume_per_atom = structure.volume / structure.num_sites\n entry = cls(\n composition=composition,\n formation_energy_per_atom=formation_energy_per_atom,\n volume_per_atom=volume_per_atom,\n temperature=temperature,\n **kwargs,\n )\n return entry\n\n @property\n def is_experimental(self):\n return bool(self.data.get(\"icsd_ids\"))\n\n def as_dict(self) -> dict:\n \"Returns an MSONable dict.\"\n data = super().as_dict()\n data[\"volume_per_atom\"] = self.volume_per_atom\n data[\"formation_energy_per_atom\"] = self.formation_energy_per_atom\n data[\"temperature\"] = self.temperature\n return data\n\n @classmethod\n def from_dict(cls, d) -> \"GibbsComputedEntry\":\n \"Returns a GibbsComputedEntry object from MSONable dictionary\"\n dec = MontyDecoder()\n entry = cls(\n composition=d[\"composition\"],\n formation_energy_per_atom=d[\"formation_energy_per_atom\"],\n volume_per_atom=d[\"volume_per_atom\"],\n temperature=d[\"temperature\"],\n energy_adjustments=dec.process_decoded(d[\"energy_adjustments\"]),\n parameters=d[\"parameters\"],\n data=d[\"data\"],\n entry_id=d[\"entry_id\"],\n )\n return entry\n\n def __repr__(self):\n output = [\n f\"GibbsComputedEntry | {self.entry_id} | {self.composition.formula} \"\n f\"({self.composition.reduced_formula})\",\n f\"Gibbs Energy ({self.temperature} K) = {self.energy:.4f}\",\n ]\n return \"\\n\".join(output)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return (\n (self.entry_id == other.entry_id)\n and (self.temperature == other.temperature)\n and (self.composition == other.composition)\n and (self.energy == other.energy)\n )\n return False\n\n def __hash__(self):\n data_md5 = hashlib.md5(\n \"GibbsComputedEntry\"\n f\"{self.composition}_\"\n f\"{self.energy}_{self.entry_id}_\"\n f\"{self.temperature}\".encode(\"utf-8\")\n ).hexdigest()\n return int(data_md5, 16)\n","sub_path":"y2mn2o7_selectivity/reaction-network/src/rxn_network/entries/gibbs.py","file_name":"gibbs.py","file_ext":"py","file_size_in_byte":10944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"422477578","text":"\"\"\"A threading.Thread subclass that can be cancelled.\n\n\"\"\"\n\nfrom threading import Thread\nfrom typing import Generator\n\n\nclass CancellableThread(Thread):\n \"\"\"A threading.Thread that can be cancelled\n\n A CancellableThread can be stopped asynchronously by the main thread\n if the supplied generator cooperates. The function executed in the\n `run` method must call `yield` periodically. The thread will be more\n responsive to cancelling if generator yields often. A thread should\n not be re-used after it has been cancelled.\n \"\"\"\n\n def __init__(self, target: Generator[None, None, None], name: str = None):\n \"\"\"\n :param target: generator\n :param name: str\n \"\"\"\n if not isinstance(target, Generator):\n raise ValueError(f\"Target must be a generator, not: {type(target)}\")\n\n super().__init__(\n target=target, # type:ignore\n name=name,\n daemon=True,\n )\n self._is_cancelled = False\n\n def run(self) -> None:\n \"\"\"Executes the `target` generator until the thread is cancelled.\n\n The `target` generator is expected to perform a long running operation\n that periodically calls \"yield\" to allow the thread to check if\n it has been cancelled.\n\n ```python\n def long_running(interval:float = 1.0) -> None:\n while True:\n # some operation here\n yield\n time.sleep(interval)\n ```\n\n \"\"\"\n for _ in self._target: # type: ignore\n if self._is_cancelled:\n return\n\n def cancel(self, join: bool = True, timeout: float = 0.05) -> None:\n \"\"\"Signals that the thread should terminate as soon as possible.\n\n Call this method from the main thread on the running thread.\n\n :param join: bool\n :param timeout: float\n \"\"\"\n self._is_cancelled = True\n if join:\n while self.is_alive():\n self.join(timeout)\n","sub_path":"busylight/lights/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"549152187","text":"# -*- coding: utf-8 -*-\n\nfrom image_match.goldberg import ImageSignature\nfrom elasticsearch import Elasticsearch\nfrom image_match.elasticsearch_driver import SignatureES\nimport statistics\nimport sys, os\nimport json\n\n\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\nimport crossparser_tools\n\ntemp_folder = crossparser_tools.temp_folder\nconfig_folder = crossparser_tools.config_folder\ndata_folder = crossparser_tools.data_folder\nwebsite_root = crossparser_tools.website_root\nproj_root_dir = crossparser_tools.proj_root_dir\n\nimg_folder = crossparser_tools.img_folder\nimg_module_folder = crossparser_tools.img_module_folder\n\n\nimg_db_products = {}\n\ndist_cutoff = 0.7\n\n\ndef one_img_search(img):\n res = ses.search_image(img)\n\n match_prods = {}\n\n for img in res:\n dist = img['dist']\n prod_id = img['metadata']['prod_id']\n if prod_id not in match_prods:\n match_prods[prod_id] = dist\n\n return match_prods\n\n\ndef mul_img_search(imgs):\n\n match_prods = {}\n\n img_ind = -1\n\n for img in imgs:\n img_ind += 1\n match_prods_new = one_img_search(img)\n\n\n #Increase not found items\n for prod_id, dist in match_prods.items():\n if prod_id not in match_prods_new:\n match_prods[prod_id] = str(match_prods[prod_id]) + '||' + str(dist_cutoff)\n\n #Copy new found and increase found\n for prod_id, dist in match_prods_new.items():\n if prod_id not in match_prods:\n for i in range(img_ind):\n match_prods[prod_id] = str(dist_cutoff) + '||'\n if img_ind > 0 :\n match_prods[prod_id] += str(dist)\n else:\n match_prods[prod_id] = str(dist)\n\n else:\n match_prods[prod_id] = str(match_prods[prod_id]) + '||' + str(dist)\n\n\n for prod_id, dists in match_prods.items():\n dists = str(dists).split('||')\n dist_sum = 0.0\n for dis in dists:\n dist_sum += float(dis)\n\n dist_sum = dist_sum / len(dists)\n\n match_prods[prod_id] = dist_sum\n\n\n\n return match_prods\n\ndef dic_to_list(dic):\n\n dic_list = []\n\n for prod_id, dist in dic.items():\n dic_list.append({'prod_id' : prod_id, 'dist' : dist})\n\n dic_list = sorted(dic_list, key = lambda k:k['dist'], reverse=False)\n return dic_list\n\n\ndef parse_img_db():\n with open(data_folder + 'img_db_prods', 'r') as cr_file:\n for line in cr_file:\n if not line.strip().startswith('#'):\n if line.strip():\n k, v = line.strip().split('$$')\n img_db_products[k.strip()] = v.strip()\n\n\ndef search_products_for(prod_id):\n imgs = []\n\n for img, id in img_db_products.items():\n if prod_id == id:\n imgs.append(img_folder + img)\n\n if len(imgs) == 0:\n print('No image found')\n return\n\n is_one_img_search = False\n\n\n if is_one_img_search:\n\n return dic_to_list(one_img_search(imgs[0]))\n\n else:\n\n return dic_to_list(mul_img_search(imgs))\n\n\n\nif __name__ == '__main__':\n\n parse_img_db()\n\n if len(sys.argv) == 2:\n prod_id = sys.argv[1]\n else:\n if len(img_db_products) > 0:\n prod_id = img_db_products[next(iter(img_db_products))]\n else:\n prod_id = '219720bed2MP002XW1GZVD'\n\n #print('prod_id', prod_id)\n\n es = Elasticsearch()\n ses = SignatureES(es, distance_cutoff=5.0)\n\n\n print(json.dumps(search_products_for(prod_id)))\n\n quit()\n\n","sub_path":"CrossParser/source/ImageMatch/test_match.py","file_name":"test_match.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490078213","text":"'''\nA Recurrent Neural Network (LSTM) implementation example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\nLong Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import rnn, rnn_cell\nimport ast\nimport csv\nimport numpy as np\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\ndef normalize(train, test):\n mean, std = train.mean(), test.std()\n train = (train - mean) / std\n test = (test - mean) / std\n return train, test\n\ndef loadTrainingData():\n attribute_list_new = []\n label_list = []\n reader = (open(\"../finalDataSetNeuralNet2/train_X_10_11_12_13_14_15_16_dup_ra_ordered.txt\", \"rt\"))\n statsList = reader.readlines()\n # print(stats)\n\n for stat in statsList:\n str2 = ast.literal_eval(stat)\n # print(str2[0])\n attribute_list55 = []\n for i in range(0, len(str2[0])):\n str2[0][i] = str(str2[0][i])\n if (str2[0][i][-1] == \"%\"):\n str2[0][i] = str2[0][i][:-1]\n\n attribute_list55.append(str2[0][i])\n # print(str2[0])\n\n for i in range(0, len(str2[1])):\n str2[1][i] = str(str2[1][i])\n if (str2[1][i][-1] == \"%\"):\n str2[1][i] = str2[1][i][:-1]\n attribute_list55.append(str2[1][i])\n\n # print(attribute_list55)\n # print(str2[1])\n attribute_list_new.append(attribute_list55)\n\n training_attributes = np.array(attribute_list_new).astype(np.float32)\n\n reader=csv.reader(open(\"../finalDataSetNeuralNet2/train_Y_10_11_12_13_14_15_16_dup_ra_ordered.txt\",\"rt\"))\n for row in reader:\n # attributes in column 1\n label_list.append(row[0])\n\n # training_attributes=np.array(attribute_list).astype(np.float32)\n\n training_class_labels=np.array(label_list).astype(np.int32)\n\n return training_attributes, training_class_labels\n\ndef testingData():\n label_list = []\n attribute_list_new = []\n reader = (open(\"../finalDataSetNeuralNet2/test_X_10_11_12_13_14_15_16_dup_ra_ordered.txt\", \"rt\"))\n statsList = reader.readlines()\n # print(stats)\n for stat in statsList:\n str2 = ast.literal_eval(stat)\n # print(str2[0])\n attribute_list55 = []\n for i in range(0, len(str2[0])):\n str2[0][i] = str(str2[0][i])\n if (str2[0][i][-1] == \"%\"):\n str2[0][i] = str2[0][i][:-1]\n\n attribute_list55.append(str2[0][i])\n # print(str2[0])\n\n for i in range(0, len(str2[1])):\n str2[1][i] = str(str2[1][i])\n if (str2[1][i][-1] == \"%\"):\n str2[1][i] = str2[1][i][:-1]\n attribute_list55.append(str2[1][i])\n\n # print(attribute_list55)\n # print(str2[1])\n attribute_list_new.append(attribute_list55)\n\n testing_attributes = np.array(attribute_list_new).astype(np.float32)\n\n counter = 0\n\n reader=csv.reader(open(\"../finalDataSetNeuralNet2/test_Y_10_11_12_13_14_15_16_dup_ra_ordered.txt\",\"rt\"))\n for row in reader:\n counter = counter+1\n # print(counter)\n # attributes in column 1\n label_list.append(row[0])\n\n # testing_attributes=np.array(attribute_list).astype(np.float32)\n testing_labels=np.array(label_list).astype(np.int32)\n return testing_attributes, testing_labels\n\nx_train_vals, y_train_vals = loadTrainingData()\nx_test_vals, y_test_vals = testingData()\n\nprint(x_train_vals)\nprint(y_train_vals)\n\nx_train_vals, x_test_vals = normalize(x_train_vals, x_test_vals)\n\nimport numpy as np\n\ndef next_batch(num, dataX, dataY):\n \"\"\"\n Return a total of `num` samples from the array `data`.\n \"\"\"\n idx = np.arange(0, len(dataX)) # get all possible indexes\n np.random.shuffle(idx) # shuffle indexes\n idx = idx[0:num] # use only `num` random indexes\n data_shuffleX = [dataX[i] for i in idx] # get list of `num` random samples\n data_shuffleX = np.asarray(data_shuffleX) # get back numpy array\n data_shuffleY = [dataY[i] for i in idx] # get list of `num` random samples\n data_shuffleY = np.asarray(data_shuffleY) # get back numpy array\n\n return data_shuffleX, data_shuffleY\n\n# # demo data, 1d and 2d array\n# Xtr, Ytr = np.arange(0, 10), np.arange(0, 100).reshape(10, 10)\n# print(Xtr)\n# print(Ytr)\n\nprint(\"\\n5 randnom samples from 1d array:\")\nprint(next_batch(50, x_train_vals, y_train_vals))\n\nbatch_x, batch_y = mnist.train.next_batch(50)\n\nprint(batch_x)\nprint(batch_y)\n\n\n\n\n'''\nTo classify images using a recurrent neural network, we consider every image\nrow as a sequence of pixels. Because MNIST image shape is 28*28px, we will then\nhandle 28 sequences of 28 steps for every sample.\n'''\n\n# Parameters\nlearning_rate = 0.001\ntraining_iters = 100000\nbatch_size = 32\ndisplay_step = 10\n\n# Network Parameters\nn_input = 23 # MNIST data input (img shape: 28*28)\nn_steps = 2 # timesteps\nn_hidden = 128 # hidden layer num of features\nn_classes = 1 # MNIST total classes (0-9 digits)\n\nn_input_tennis = 46 # MNIST data input (img shape: 28*28)\nn_steps_tennis = 32 # timesteps\nn_hidden_tennis = 128 # hidden layer num of features\nn_classes_tennis = 2 # MNIST total classes (0-9 digits)\n\n# tf Graph input\nx = tf.placeholder(\"float\", [None, n_steps, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])\n\n# Define weights\nweights = {\n 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\n}\nbiases = {\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n\ndef RNN(x, weights, biases):\n\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, n_steps, n_input)\n # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)\n\n # Permuting batch_size and n_steps\n x = tf.transpose(x, [1, 0, 2])\n # Reshaping to (n_steps*batch_size, n_input)\n x = tf.reshape(x, [-1, n_input])\n # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n x = tf.split(0, n_steps, x)\n\n # Define a lstm cell with tensorflow\n lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n\n # Get lstm cell output\n outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)\n\n # Linear activation, using rnn inner loop last output\n return tf.matmul(outputs[-1], weights['out']) + biases['out']\n\npred = RNN(x, weights, biases)\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < training_iters:\n batch_x, batch_y = next_batch(batch_size, x_train_vals, y_train_vals)\n # Reshape data to get 28 seq of 28 elements\n batch_x = batch_x.reshape((batch_size, n_steps, n_input))\n batch_y = batch_y.reshape(batch_size, n_classes)\n # Run optimization op (backprop)\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n if step % display_step == 0:\n # Calculate batch accuracy\n acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})\n # Calculate batch loss\n loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n print(\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n step += 1\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for 128 mnist test images\n test_len = 32\n test_data = x_test_vals[:test_len].reshape((-1, n_steps, n_input))\n test_label = y_test_vals[:test_len].reshape(batch_size, n_classes)\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={x: test_data, y: test_label}))","sub_path":"src/machine_learning/lstmmdist.py","file_name":"lstmmdist.py","file_ext":"py","file_size_in_byte":8342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"543504383","text":"# -*- coding unix -*-\n\nfrom cerberus import Validator\nimport lmrt4u.helpers as helpers\n\nschema = {\n 'sprints': {\n 'type': 'dict',\n 'valueschema': {\n 'type': 'dict',\n 'schema': {\n 'active': { 'type': 'boolean' },\n 'points': { 'type': 'integer' },\n 'start': { 'type': 'datetime', 'coerce': helpers.to_date, 'is_before': 'end' },\n 'end': { 'type': 'datetime', 'coerce': helpers.to_date },\n 'stories': { 'type': 'list', 'schema': {'type': 'list'} }\n }\n }\n }\n}\n\nclass CustomValidator(Validator):\n \"\"\"Allows for isBefore datetime validation\"\"\"\n def _validate_is_before(self, other, field, value):\n \"\"\" \n Validate field is before other field.\n The rule's arguments are validated against this schema:\n {'type': 'string'}\n \"\"\"\n if other not in self.document:\n return False\n if value > self.document[other]:\n self._error(field, \n \"%s is an early date.\" % other)\n\ndef validate(rawData):\n \"\"\"Validates file contents\"\"\"\n v = CustomValidator()\n return v.validate(rawData, schema)\n","sub_path":"lmrt4u/Validator.py","file_name":"Validator.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"265464024","text":"import numpy as np \nfrom sklearn.cluster import KMeans \nfrom datetime import datetime, date, timedelta\nfrom sklearn.preprocessing import normalize\nimport random,os\nimport matplotlib\nimport matplotlib.pyplot as plt \n\n'''\n采用kmeans聚类,按照评价指标对项目聚类\n''' \n\n\ndef k_means(data, k):\n # 聚类\n # 首先得到每个项目每个月的特征向量,例如 P_i = (x1,x2,...,x8)代表项目P在从创建开始(在GitHub上的created_at)第i个月的各个特征的值\n X = data[:]\n X = normalize(X = X, axis=0)\n X = np.array(X)\n kmeans = KMeans(n_clusters=k, init='random', random_state=0, max_iter=500).fit(X)\n return list(kmeans.labels_), list(kmeans.cluster_centers_)\n \n# 画出每类类中心的各个评价指标对比的条形图\ndef Draw_graph(data, x_labels, centers, cluster_num, n = 0):\n # labels = ['forks','committer','commits','commit_comment',\n # 'req_opened','req_closed','req_merged','other','issue','issue_comment','watchers']\n labels = ['forks','committer','commits','commit_comment',\n 'req_opened','req_closed','req_merged','other','issue','issue_comment','watchers',\n 'forks_std','committer_std','commits_std','commit_comment_std',\n 'req_opened_std','req_closed_std','req_merged_std','other_std','issue_std','issue_comment_std','watchers_std'] \n x = np.arange(len(labels)) # the label locations\n width = 0.15 if cluster_num<5 else 0.15*5/cluster_num # the width of the bars\n\n fig, ax = plt.subplots()\n rects = []\n for i in range(cluster_num):\n pos = x-cluster_num/2*width + (2*i+1)*width/2\n rect = ax.bar(pos, centers[i], width, label = str(i))\n rects.append(rect)\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('feature_value')\n ax.set_title(str(cluster_num)+' cluters')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend()\n\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n # for rect in rects:\n # autolabel(rect)\n\n fig.tight_layout()\n\n plt.show() \n\n## 从各类项目(project_label)中随机选择n个项目出来进行观察\ndef choose_project(data, project_label, projects_valid, cluster_num, n):\n projects = [ [] for i in range(cluster_num)]\n selects = [] # 记录的是选出来的项目在projects_valid中的下标\n for i in range(len(project_label)):\n cur_cluster = project_label[i]\n projects[cur_cluster].append(i)\n\n for i in range(cluster_num):\n print(\"cluster \" + str(i) + \" count is \" + str(len(projects[i])) ) #输出每类的项目个数\n if len(projects[i])>n:\n tmp = random.sample(projects[i], n)\n selects.append(tmp)\n else:\n selects.append(projects[i])\n print(\"Error: cluster \" + str(i) + \" is not enough\")\n for i in range(cluster_num):\n print(\"********************* The cluster \" + str(i) + \" ********************* \")\n for j in selects[i]:\n print([round(v, 4) for v in data[j]]) # 选出来的每类的标准化后的数据\n return selects\n\n\n# if __name__ == '__main__':\n# data = []\n# cluster_num = 5\n# root_path = os.getcwd() + '\\\\data\\\\'\n\n# projects_valid = Month_all(root_path, data, 5)\n\n# if len(data)>1:\n# kmeans_labels, kmeans_centers = k_means(data, cluster_num)\n# Draw_graph(data, kmeans_labels, kmeans_centers, cluster_num, 0)","sub_path":"src/model/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"421291956","text":"#!/usr/bin/env python\n# encoding:utf-8\n\"\"\"\nauthor: liusili\n@l@icense: (C) Copyright 2019, Union Big Data Co. Ltd. All rights reserved.\n@contact: liusili@unionbigdata.com\n@software:\n@file: flask_infer\n@time: 2019/12/2\n@desc:\n\"\"\"\nimport os\nimport cv2\nimport numpy as np\nimport time, datetime\nfrom mmdet.apis import init_detector, inference_detector\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nbasedir = '/home/Visionox/V3/OLED_deploy/'\nmodel = None\nlabels = []\n# NMS_THD = 0.55\nNMS_THD = 0.3\n\ndef initModel():\n global model\n global labels\n config_file = basedir + 'config_OLED.py'\n checkpoint_file = basedir + 'v3_oled_deploy.pth'\n for line in open(basedir + 'classes.txt', \"r\"):\n lineTemp = line.strip()\n if lineTemp:\n labels.append(lineTemp)\n model = init_detector(config_file, checkpoint_file, device='cuda:0')\n\n\ndef NMS(bboxes, score, thresh):\n \"\"\"Pure Python NMS baseline.\"\"\"\n # bounding box and score\n boxes = np.array(bboxes)\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n scores = np.array(score)\n # the area of candidate\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # score in descending order\n order = scores.argsort()[::-1]\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n # Calculate the intersection between current box and other boxes\n # using numpy->broadcast, obtain vector\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n # intersection area, return zero if no intersection\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n # IOU:intersection area /(area1+area2-intersection area)\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n # find out box the overlap ratio smaller than threshold\n inds = np.where(ovr <= thresh)[0]\n # update order\n order = order[inds + 1]\n return keep\n\n\ndef selectClsScoreBoxFromResult(result, cls_names):\n assert isinstance(cls_names, (tuple, list))\n\n if isinstance(result, tuple):\n bbox_result, segm_result = result\n else:\n bbox_result, segm_result = result, None\n bboxes = np.vstack(bbox_result)\n\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)]\n labels = np.concatenate(labels)\n selectedCls = []\n selectedScore = []\n selectedBox = []\n assert (len(labels) == len(bboxes))\n for i in range(0, len(labels)):\n # selectedResult.append([cls_names[labels[i]], bboxes[i][-1]])\n selectedCls.append(cls_names[labels[i]])\n selectedScore.append(bboxes[i][-1])\n tempBox = []\n tempBox = bboxes[i][0], bboxes[i][1], bboxes[i][2], bboxes[i][3]\n selectedBox.append(tempBox)\n return selectedCls, selectedScore, selectedBox\n\n\ndef infer(sample_root, outpath):\n global model\n for code in os.listdir(sample_root):\n code_path = os.path.join(sample_root, code)\n for img_name in os.listdir(code_path):\n imagepath = os.path.join(code_path, img_name)\n img = open(imagepath, 'rb').read()\n if img == None:\n print('img is none')\n nparr = np.fromstring(img, np.uint8)\n img_np = cv2.imdecode(nparr, 1)\n # 边缘裁剪\n img_np = img_np[:, :1228, :]\n # opzealot\n height = img_np.shape[0]\n width = img_np.shape[1]\n\n sys_time = int(int(round(time.time() * 1000)))\n cur_dir = os.getcwd()\n localtime = time.localtime(time.time())\n result = {}\n result['defect'] = 0\n out = inference_detector(model, img_np)\n\n log_codes = []\n log_scores = []\n bboxs = []\n log_codes, log_scores, bboxs = selectClsScoreBoxFromResult(out, labels)\n if len(log_codes) != 0:\n result['defect'] = 1\n validResult = np.arange(0, len(bboxs))\n if len(bboxs) > 1:\n validResult = NMS(bboxs, log_scores, NMS_THD)\n\n for index in validResult:\n # ignore edges codes\n xmin = bboxs[index][0]\n ymin = bboxs[index][1]\n xmax = bboxs[index][2]\n ymax = bboxs[index][3]\n\n center_x = (xmin + xmax) // 2\n center_y = (ymin + ymax) // 2\n\n if center_x < 100 or center_y < 100 or center_x > width - 100 \\\n or center_y > height - 100:\n log_scores[index] = 0\n\n if log_scores[index] > 0:\n cv2.rectangle(img_np, (bboxs[index][0], bboxs[index][1]),\n (bboxs[index][2], bboxs[index][3]), (0, 255, 255), thickness=2)\n strText = str(code) + ': ' + str(log_scores[index])\n cv2.putText(img_np, strText, (bboxs[index][0], bboxs[index][1]),\n cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 0), 2)\n\n target_img_dir = outpath\n os.makedirs(target_img_dir, exist_ok=True)\n target_img_file_path = os.path.join(target_img_dir, img_name)\n cv2.imwrite(target_img_file_path, img_np)\n print('save img {}'.format(img_name))\n\n result['log_codes'] = log_codes\n result['log_score'] = str(log_scores)\n\n out_label = None\n out_score = None\n out_bbox = None\n if len(log_scores) == 0:\n out_label = 'Others'\n out_score = str(0.0)\n out_bbox = None\n else:\n out_score = max(log_scores)\n out_label = log_codes[log_scores.index(out_score)]\n out_bbox = bboxs[log_scores.index(out_score)]\n if out_score < 0.4:\n out_label = 'Others'\n\n # opzealot set the background threshold\n if out_score < 0.2:\n out_label = 'OK'\n out_score = 0.99\n result['img_cls'] = out_label\n result['img_score'] = str(out_score)\n result['detect_begin_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n result['detect_cost_time'] = '{:.2f}'.format(int(int(round(time.time() * 1000))) - sys_time)\n result['savepath'] = imagepath.replace('input', 'result')\n\n else:\n target_img_dir = outpath\n os.makedirs(target_img_dir, exist_ok=True)\n target_img_file_path = os.path.join(target_img_dir, img_name)\n cv2.imwrite(target_img_file_path, img_np)\n print('save image to {}'.format(target_img_file_path))\n\n result['defect'] = 1\n result['img_cls'] = 'OK'\n result['img_score'] = str(0.99)\n result['detect_begin_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n result['detect_cost_time'] = '{:.2f}'.format(int(int(round(time.time() * 1000))) - sys_time)\n result['savepath'] = target_img_file_path\n\n\nif __name__ == '__main__':\n initModel()\n imagepath = '/home/Visionox/V3/OLED_deploy/OLED_test'\n outpath = '/home/Visionox/V3/OLED_deploy/output'\n infer(imagepath, outpath)","sub_path":"tools_2/inferring/flask_infer_modify.py","file_name":"flask_infer_modify.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35044343","text":"import sys\nsys.path.insert(0, '/app/football/')\nfrom gfootball.env import football_env\nfrom gfootball.env import config\n\nprint (football_env.__file__)\n\n#O -> my team (left)\n#1 -> opposing team (right)\nclass ObservationDebugger:\n def __init__(self):\n\n #only conerned with left player\n self.observations = {} #key,value -> step, observation\n self.step_ct = 0\n\n\n def process_observation(self, step):\n\n this_obs, this_action = self.observations[step]\n this_ball_owned_team = this_obs['ball_owned_team']\n this_ball_owned_player = this_obs['left_agent_controlled_player']\n print (this_ball_owned_team, this_ball_owned_player, step, this_obs['score'], this_action )\n for i in range(30):\n prev_obs, prev_action = self.observations[step-i-1]\n prev_ball_owned_team = prev_obs['ball_owned_team']\n prev_ball_owned_player = prev_obs['left_agent_controlled_player']\n print (prev_ball_owned_team, prev_ball_owned_player, step-i-1, prev_obs['score'], prev_action)\n exit()\n\n\n def add_observation(self, obs, action):\n self.observations[self.step_ct] = (obs, action)\n self.step_ct += 1\n\nclass Rectangle(object):\n def __init__(self, xrange, yrange, zrange):\n self.xrange = xrange # (xmin, xmax)\n self.yrange = yrange\n self.zrange = zrange\n\n def contains_point(self, p):\n if not all(hasattr(p, loc) for loc in 'xyz'):\n raise TypeError(\"Can only check if 3D points are in the rect\")\n return all([self.xrange[0] <= p.x <= self.xrange[1],\n self.yrange[0] <= p.y <= self.yrange[1],\n self.zrange[0] <= p.z <= self.zrange[1]])\n\nclass Point(object):\n def __init__(self, x, y ,z):\n self.x = x\n self.y = y\n self.z = z\n\n def __iter__(self):\n yield from (self.x, self.y, self.z)\n\n def __str__(self):\n return \"str {} {} {}\".format(self.x, self.y, self.z)\n\nckpt_path = 'corner_ckpt_all/00200'\nplayers = [\"ppo2_cnn:left_players=1,policy=impala_cnn,checkpoint={0}\".format(ckpt_path)]\ncfg = config.Config({\n 'action_set':'default',\n 'dump_full_episodes': False,\n 'real_time':False,\n 'players' : players,\n 'level':'academy_pass_and_shoot_with_keeper'\n})\n\nenv = football_env.FootballEnv(cfg)\n\nenv.reset()\n\nobsDebugger = ObservationDebugger()\n\nmy_score = 0\nopp_score = 0\nstep = 0\ntotal_diff = 0.0\ntotal_eps = 0\nOpponent_GOAL = Rectangle(xrange = (.7, 1.1), yrange = (-.12,.12), zrange = (0, 2.5))\n\nwhile True:\n obs, rew, done, info = env.step([])\n\n ball_pos = obs['ball']\n # ball_point = Point(ball_pos[0], ball_pos[1], ball_pos[2])\n # ball_on_targ = Opponent_GOAL.contains_point(ball_point)\n # if not rew == 0:\n # print (rew)\n # print (info)\n # exit()\n if rew == 1.0:\n my_score += 1\n if rew == -1.0:\n opp_score += 1\n\n if done:\n diff = my_score - opp_score\n\n total_diff += diff\n my_score = 0\n opp_score = 0\n env.reset()\n\n total_eps += 1\n if total_eps == 100:\n\n break\nprint (total_diff)\nprint (total_diff/total_eps)\nprint (ckpt_path)\n","sub_path":"755_project/play_agent.py","file_name":"play_agent.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"515059901","text":"import pytest\nimport pandas as pd\nimport wandb\n\n\nrun = wandb.init(project=\"conftest_demo\", job_type=\"data_tests\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--reference_artifact\", action=\"store\")\n parser.addoption(\"--sample_artifact\", action=\"store\")\n parser.addoption(\"--ks_alpha\", action=\"store\")\n\n\n@pytest.fixture(scope=\"session\")\ndef data(request):\n\n reference_artifact = request.config.option.reference_artifact\n\n if reference_artifact is None:\n pytest.fail(\"--reference_artifact missing on command line\")\n\n sample_artifact = request.config.option.sample_artifact\n\n if sample_artifact is None:\n pytest.fail(\"--sample_artifact missing on command line\")\n\n local_path = run.use_artifact(reference_artifact).file()\n sample1 = pd.read_csv(local_path)\n\n local_path = run.use_artifact(sample_artifact).file()\n sample2 = pd.read_csv(local_path)\n\n return sample1, sample2\n\n\n@pytest.fixture(scope='session')\ndef ks_alpha(request):\n ks_alpha = request.config.option.ks_alpha\n\n if ks_alpha is None:\n pytest.fail(\"--ks_threshold missing on command line\")\n\n return float(ks_alpha)\n","sub_path":"09_conftest_demo/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"162991014","text":"import torch\n\nimport os\nfrom os import path\nimport time\nimport random\nimport argparse\n\nfrom data import *\nfrom seq2seq_attn import *\n\n# preprocess\nstart_tok = ''\nend_tok = ''\nunk_tok = ''\n\n# learning params\ninit_range = 0.08\nepochs = 20\neval_step = 1000\nbatch_size = 64\ncriterion = nn.NLLLoss()\n\n# seq2seq params\nembedding_dim = 128\nenc_hidden_dim = 128\ndec_hidden_dim = 128\n\nenc_layers = 1\ndec_layers = 1\n\n# output\nmodel_path = \"model.dat\"\n\ndef evaluate(model, set):\n\trandom.shuffle(set)\n\tpreds = []\n\tfor i in range(len(set)):\n\t\t#i=0\n\t\tlemma = set[i][0].tolist()\n\t\tword = set[i][1].tolist()\n\t\tfeats = set[i][2].tolist()\n\t\tpred = model(set[i], type='evaluate')\n\t\tpreds.append([word[1:], pred])\n\treturn preds\n\ndef score(preds):\n\tcorrect = 0\n\tfor pair in preds:\n\t\t# print('pair[0]', pair[0], 'pair[1]', pair[1])\n\t\tif pair[0] == pair[1]:\n\t\t\tcorrect += 1\n\treturn correct / len(preds)\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-lang', dest='lang', help='choose the language to run the task on', default='german', type=str)\n\targs = parser.parse_args()\n\n\tlanguage = args.lang\n\n\tout_stats_path = 'results/' + language + '_seq2seq.tsv'\n\n\t# data paths\n\ttrain_path = 'data/conll2017/all/task1/' + language + '-train-high'\n\tdev_path = 'data/conll2017/all/task1/' + language + '-dev'\n\ttest_path = 'data/conll2017/answers/task1/' + language + '-uncovered-test'\n\n\tdata = Data(train_path, dev_path, test_path)\n\tchar_vocab_len = data.create_char_vocab()\n\tfeat_vocab_len = data.create_feat_vocab()\n\ttrain, dev, test = data.vectorize()\n\t# dimension of train is 10000 x 3 or 10000 x [lemma, word, feats]\n\n\n\tspecial_toks = {'sos': data.get_char_id(start_tok), 'eos': data.get_char_id(end_tok)}\n\tmodel = Seq2Seq(char_vocab_len, feat_vocab_len, embedding_dim, enc_hidden_dim, dec_hidden_dim, special_toks)\n\n\tif os.path.exists(model_path):\n\t\tsaved_state = torch.load(model_path)\n\t\tmodel.load_state_dict(saved_state)\n\telse:\n\t\tmodel.init_weights(init_range)\n\t\toptimizer = optim.Adam(model.parameters())\n\t\ttrain_loss = 0\n\t\tstep = 0\n\n\t\ttrain_holdout = train[:1000]\n\n\t\twith open(out_stats_path, 'w+') as f:\n\t\t\tf.write('epoch\\ttrain_loss\\ttrain_acc\\tdev_acc\\n')\n\n\t\tfor epoch in range(epochs):\n\t\t\trandom.shuffle(train)\n\t\t\tfor i in range(len(train)):\n\t\t\t\tlemma = train[i][0]\n\t\t\t\tword = train[i][1]\n\t\t\t\tfeats = train[i][2]\n\t\t\t\tpred = model(train[i], type='train')\n\n\t\t\t\t# print('word: {:30} pred: {:30}'.format(data.vec2word(word), data.vec2word([x.max(0)[1].item() for x in pred])))\n\n\t\t\t\ttotal_loss = None\n\t\t\t\t# print('pred', pred.size(), 'word', word.size())\n\t\t\t\tloss = criterion(pred, word[1:])\n\t\t\t\tif total_loss is None:\n\t\t\t\t\ttotal_loss = loss\n\t\t\t\telse:\n\t\t\t\t\ttotal_loss += loss\n\n\t\t\t\toptimizer.zero_grad()\n\t\t\t\ttotal_loss.backward()# print('w_embeds_i =', w_embeds_i.size())\n\t\t\t\t# print('h0 =', h0.size())\n\t\t\t\t# print('self.c0 =', self.c0.size())\n\t\t\t\toptimizer.step()\n\t\t\t\ttrain_loss += total_loss\n\n\t\t\t\tstep += 1\n\t\t\t\tif step % eval_step == 0:\n\t\t\t\t\ttrain_preds = evaluate(model, train_holdout)\n\t\t\t\t\ttrain_acc = score(train_preds)\n\t\t\t\t\tdev_preds = evaluate(model, dev)\n\t\t\t\t\tdev_acc = score(dev_preds)\n\t\t\t\t\tprint('train examples')\n\t\t\t\t\tfor j in range(5):\n\t\t\t\t\t\tprint('word: {:30} pred: {:30}'.format(data.vec2word(train_preds[j][0]), data.vec2word(train_preds[j][1])))\n\t\t\t\t\tprint('dev examples')\n\t\t\t\t\tfor j in range(5):\n\t\t\t\t\t\tprint('word: {:30} pred: {:30}'.format(data.vec2word(dev_preds[j][0]), data.vec2word(dev_preds[j][1])))\n\t\t\t\t\tprint('epoch: {:.2f}/{:d} completion: {:.2f}% train loss: {:.4f} train acc: {:f} dev acc: {:f}'.format(\n\t\t\t\t\t\tfloat(epoch) + ((i+1)/len(train)), epochs, (step/(epochs*len(train)))*100, train_loss / eval_step, train_acc*100, dev_acc*100))\n\n\t\t\t\t\ttrain_loss_str = str(round((train_loss / eval_step).item(), 3))\n\t\t\t\t\ttrain_acc_str = str(round(train_acc*100, 2))\n\t\t\t\t\tdev_acc_str = str(round(dev_acc*100, 2))\n\t\t\t\t\tepoch_str = str(float(epoch) + ((i+1)/len(train)))\n\t\t\t\t\twith open(out_stats_path, 'a') as f:\n\t\t\t\t\t\tf.write(epoch_str + '\\t' + train_loss_str + '\\t' + train_acc_str + '\\t' + dev_acc_str + '\\n')\n\n\t\t\t\t\tprint()\n\t\t\t\t\ttrain_loss = 0\n","sub_path":"seq2seq_attn/run_attn.py","file_name":"run_attn.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"353007564","text":"##===============================================\n## Jiadong Mai (20557203)\n## CS 116 Winter 2018\n## Assignment 05, Question 2\n##===============================================\nimport check\n# Question 2\n# count_digits_acc(str_num, list_is_required, start_num) returns a list of \n# length 10, list_is_required, where the start_numth element of the list \n# contains the number of times that the digit start_num occurs in star_num\n# count_digits_acc: Str (listof Nat) Nat -> (listof Nat)\n# Examples:\n# count_digits_acc('440222', [], 0) => [1, 0, 3, 0, 2, 0, 0, 0, 0, 0]\n# count_digits('973195', [], 0) => [0, 1, 0, 1, 0, 1, 0, 1, 0, 2]\ndef count_digits_acc(str_num, list_is_required, start_num):\n if start_num > 9:\n return list_is_required\n else:\n list_is_required.append(str_num.count(str(start_num)))\n return count_digits_acc(str_num, list_is_required, start_num+1)\n\n# count_digits(n) returns the count of each digit in n\n# count_digits: Nat -> (listof Nat)\n# Examples:\n# count_digits(440222) => [1, 0, 3, 0, 2, 0, 0, 0, 0, 0]\n# count_digits(973195) => [0, 1, 0, 1, 0, 1, 0, 1, 0, 2]\ndef count_digits(n):\n string = str(n)\n return count_digits_acc(string, [], 0)\n# Test:\n# Test1: n = 0\ncheck.expect('Q2T1', count_digits(0), [1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n# Test2: number from 0-9 happen\ncheck.expect('Q2T2', count_digits(1234567890), [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\ncheck.expect('Q2T3', count_digits(1234567312890), [1, 2, 2, 2, 1, 1, 1, 1, 1, 1])\ncheck.expect('Q2T4', count_digits(12233445567312890), [1, 2, 3, 3, 2, 2, 1, 1, 1, 1])\n# Test3: some number happen more than 2 times\ncheck.expect('Q2T5', count_digits(1789789789789), [0, 1, 0, 0, 0, 0, 0, 4, 4, 4])\ncheck.expect('Q2T6', count_digits(1578682457), [0, 1, 1, 0, 1, 2, 1, 2, 2, 0])\n# Test4: only one number exist\ncheck.expect('Q2T7', count_digits(999999), [0, 0, 0, 0, 0, 0, 0, 0, 0, 6])\ncheck.expect('Q2T8', count_digits(111111), [0, 6, 0, 0, 0, 0, 0, 0, 0, 0])\ncheck.expect('Q2T9', count_digits(000000), [1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n","sub_path":"CS116/a05-j4mai/a05-j4mai/a05q2.py","file_name":"a05q2.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"166529691","text":"\nimport flask\nimport datetime\nimport flask.ext.sqlalchemy\nimport flask.ext.restless\n\n\napp = flask.Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/course.db'\ndb = flask.ext.sqlalchemy.SQLAlchemy(app)\n\n\nclass Course(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n code = db.Column(db.Unicode)\n title = db.Column(db.Unicode)\n description = db.Column(db.Unicode)\n department = db.Column(db.Unicode)\n status = db.Column(db.Integer)\n\n\n classes = db.relationship(\"Class\", backref=\"course\")\n\n\nclass Class(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n instructor = db.Column(db.Unicode)\n schedule = db.Column(db.Unicode)\n max_roster_size = db.Column(db.Integer)\n term = db.Column(db.Unicode)\n course_id = db.Column(db.Integer, db.ForeignKey('course.id'))\n\n\n# Creating the database tables.\ndb.create_all()\n\n# Creating the Flask-Restless API manager.\nmanager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)\n\n\nmanager.create_api(Course, methods=['GET', 'PUT', 'POST', 'DELETE'], url_prefix='/api/v0')\nmanager.create_api(Class, methods=['GET''PUT', 'POST'], url_prefix='/api/v0')\n\n# start the flask loop\napp.run()\n","sub_path":"app/course-manager.py","file_name":"course-manager.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"206165676","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n# -*- author:北辰屏寒 -*- \n# -*- email:chromecs@qq.com -*-\n\nimport sys\nsys.path.append('../')\nsys.path.append('/spider/news')\n\nimport datetime\nfrom db.dbi import preview_upsert\nfrom sina_news.filter.preview import preview_label\nfrom sina_news.parser.preview_parser import NewsPreview\n\ndef preview_to_mongodb(plate_item):\n plate = plate_item[0]\n url = plate_item[1]\n labels = preview_label(url)\n for label in labels:\n try:\n timestamp = datetime.datetime.now()\n pre_news = NewsPreview(label)\n url = pre_news.detail_url()\n title = pre_news.title()\n source_tmp = pre_news.source()\n if source_tmp != title:\n source = source_tmp\n else:\n source = ''\n pub_date = pre_news.pub_date()\n comment_count = pre_news.comment_count()\n img_preview = pre_news.img_preview()\n if img_preview != '':\n img_preview_count = 4\n else:\n img_preview_count = 0\n preview_upsert(plate, url, title, source, pub_date, comment_count, img_preview, img_preview_count, timestamp)\n\n except:\n pass\n","sub_path":"sina_news/data/get_preview.py","file_name":"get_preview.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"15003285","text":"\"\"\"\nRecalibrating uncertainty estimates.\n\"\"\"\n\nimport numpy as np\nfrom sklearn.isotonic import IsotonicRegression\n\n\ndef get_q_idx(exp_props, q):\n num_pts = exp_props.shape[0]\n target_idx = None\n for idx, x in enumerate(exp_props):\n if idx + 1 == num_pts:\n if round(q, 2) == round(float(exp_props[-1]), 2):\n target_idx = exp_props.shape[0] - 1\n break\n if x <= q < exp_props[idx + 1]:\n target_idx = idx\n break\n if target_idx is None:\n raise ValueError(\"q must be within exp_props\")\n return target_idx\n\n\ndef iso_recal(exp_props, obs_props):\n \"\"\"\n Returns an isotonic regression model that maps from obs_props to exp_props\n \"\"\"\n # Flatten\n exp_props = exp_props.flatten()\n obs_props = obs_props.flatten()\n min_obs = np.min(obs_props)\n max_obs = np.max(obs_props)\n\n iso_model = IsotonicRegression(increasing=True, out_of_bounds=\"clip\")\n # just need observed prop values between 0 and 1\n # problematic if min_obs_p > 0 and max_obs_p < 1\n if not (min_obs == 0.0) and (max_obs == 1.0):\n print(\"Obs props not ideal: from {} to {}\".format(min_obs, max_obs))\n\n exp_0_idx = get_q_idx(exp_props, 0.0)\n exp_1_idx = get_q_idx(exp_props, 1.0)\n within_01 = obs_props[exp_0_idx : exp_1_idx + 1]\n\n beg_idx, end_idx = None, None\n # Handle beg_idx\n if exp_0_idx != 0:\n min_obs_below = np.min(obs_props[:exp_0_idx])\n min_obs_within = np.min(within_01)\n if min_obs_below < min_obs_within:\n i = exp_0_idx - 1\n while obs_props[i] > min_obs_below:\n i -= 1\n beg_idx = i\n elif np.sum((within_01 == min_obs_within).astype(float)) > 1:\n # multiple minima in within_01 ==> get last min idx\n i = exp_1_idx - 1\n while obs_props[i] > min_obs_within:\n i -= 1\n beg_idx = i\n elif np.sum((within_01 == min_obs_within).astype(float)) == 1:\n beg_idx = int(np.argmin(within_01) + exp_0_idx)\n else:\n raise RuntimeError((\"Inspect input arrays, \" \"cannot set beginning index.\"))\n else:\n beg_idx = exp_0_idx\n\n # Handle end_idx\n if exp_1_idx < obs_props.shape[0] - 1:\n max_obs_above = np.max(obs_props[exp_1_idx + 1 :])\n max_obs_within = np.max(within_01)\n if max_obs_above > max_obs_within:\n i = exp_1_idx + 1\n while obs_props[i] < max_obs_above:\n i += 1\n end_idx = i + 1\n elif np.sum((within_01 == max_obs_within).astype(float)) > 1:\n # multiple minima in within_01 ==> get last min idx\n i = beg_idx\n while obs_props[i] < max_obs_within:\n i += 1\n end_idx = i + 1\n elif np.sum((within_01 == max_obs_within).astype(float)) == 1:\n end_idx = int(exp_0_idx + np.argmax(within_01) + 1)\n else:\n raise RuntimeError(\"Inspect input arrays, cannot set ending index\")\n else:\n end_idx = exp_1_idx + 1\n\n if end_idx <= beg_idx:\n raise RuntimeError(\"Ending index before beginning index\")\n\n filtered_obs_props = obs_props[beg_idx:end_idx]\n filtered_exp_props = exp_props[beg_idx:end_idx]\n\n try:\n iso_model = iso_model.fit(filtered_obs_props, filtered_exp_props)\n except Exception:\n raise RuntimeError(\"Failed to fit isotonic regression model\")\n\n return iso_model\n","sub_path":"uncertainty_toolbox/recalibration.py","file_name":"recalibration.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"49190787","text":"#!/usr/local/bin/python\n# encoding:utf-8\n# =====================================================\n# this part set in cx_rc/vim-rc.d/model\n# created by Chen Xu\n# email: chenxu@mail.ustc.edu.cn\n# copyright cx\n# Darwin Kernel Version 17.5.0: Mon Mar 5 22:24:32 PST 2018; root:xnu-4570.51.1~1/RELEASE_X86_64\n# Last modify: 2018年 4月24日 星期二 05时52分58秒 CST\n# =====================================================\n\nimport ROOT\nimport root_numpy\nimport numpy as np\nimport sys\n\n\ndef writefile(ofname, ifname):\n array = np.load(ifname)\n fout = ROOT.TFile(ofname, \"RECREATE\")\n outtree = root_numpy.array2tree(array)\n outtree.Write()\n fout.Close()\n\n\nif __name__ == '__main__':\n argv: list = sys.argv\n argc: int = len(sys.argv)\n if argc == 1:\n writefile(ofname='mlpout.root', ifname='mlpout.d.npy')\n elif argc == 3:\n writefile(ofname=argv[1], ifname=argv[2])\n else:\n print('not known argv, pls see src')\n\n\n","sub_path":"p3_numpy2root.py","file_name":"p3_numpy2root.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"464757149","text":"__xbotpp_module__ = \"mishimmie\"\n\nimport re\nimport urllib\nfrom lxml import html\nfrom xbotpp import bot\nfrom xbotpp import logging\n\n@bot.signal.on_signal('command::mi')\ndef search(info, args, buf):\n\t\"\"\"\\\n\tCommand to search the Mishimmie for a given search term.\n\n\tConstructs a search URL and feeds it to :py:func:`miscan` to get information on it.\n\t\"\"\"\n\n\tif len(args) >= 1:\n\t\turl = \"\"\n\t\tif re.match(\"id:\", args[0]):\n\t\t\tterms = re.sub('id:', '', args[0])\n\t\t\turl = \"http://shimmie.katawa-shoujo.com/post/view/%s\" % urllib.parse.quote(terms)\n\t\telse:\n\t\t\tterms = ' '.join(args)\n\t\t\turl = \"http://shimmie.katawa-shoujo.com/post/list/%s/1\" % urllib.parse.quote(terms)\n\n\t\tres = miscan(url)\n\t\tif res:\n\t\t\treturn \"Mishimmie: %s // %s\" % (res['desc'], res['url'])\n\t\telse:\n\t\t\treturn \"Mishimmie: No results.\"\n\n\telse:\n\t\treturn \"Usage: %smi -- search the Mishimmie for \" % bot.options.prefix\n\n@bot.signal.on_signal(r'url::shimmie\\.katawa-shoujo\\.com')\ndef scan(url):\n\tt = miscan(url)\n\tif t:\n\t\treturn \"Mishimmie: {}\".format(t['desc'])\n\ndef miscan(url):\n\t\"\"\"\\\n\tMishimmie URL scanning function.\n\n\tGrabs the HTML for the given URL, and scans it.\n\tIn the case of being given a single post URL, returns the tags and the canonical page URL.\n\tIn the case of being given a search page URL, returns the tags and the canonical page URL of the\n\tfirst post on the search page.\n\n\tReturns a dict with 'desc', 'url' entries, or None if no information could be found.\n\n\t:rtype: dict or None\n\t\"\"\"\n\n\tlogging.debug(\"Scanning Mishimmie for info on %s...\" % url)\n\trawres = urllib.request.urlopen(url, timeout=5)\n\tresult = str(rawres.read(), 'utf8')\n\tdoc = html.document_fromstring(result)\n\n\ttry:\n\t\tposturl = \"\"\n\t\tpostdesc = \"\"\n\t\tlogging.debug('URL: %s' % rawres.geturl())\n\n\t\tif re.search('/post/view/', rawres.geturl()):\n\t\t\tlogging.debug('On a post page.')\n\t\t\tposturl = rawres.geturl()\n\t\t\tpostdesc = doc.get_element_by_id('imgdata').xpath('form/table/tr/td/input')[0].get('value')\n\t\telse:\n\t\t\tlogging.debug('On a search result page.')\n\t\t\tposturl = \"http://shimmie.katawa-shoujo.com%s\" % doc.find_class('thumb')[0].xpath('a')[0].get('href')\n\t\t\tpostdesc = doc.find_class('thumb')[0].xpath('a/img')[0].get(\"alt\").partition(' // ')[0]\n\n\t\tposturl = re.sub('\\?.*', '', posturl)\n\t\treturn {'desc': postdesc, 'url': posturl}\n\n\texcept IndexError:\n\t\treturn None\n","sub_path":"xbotpp_contrib/mishimmie.py","file_name":"mishimmie.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"76926205","text":"import os\nfrom collections import Counter\n\ndef dict():\n path = 'data/emails/'\n files = os.listdir(path)\n emails = [path + email for email in files]\n words = []\n\n for email in emails:\n f = open(email)\n words += f.read().split(\" \")\n\n for i, word in enumerate(words):\n if not words[i].isalpha():\n words[i] = \"\"\n\n \n dictionary = Counter(words)\n del dictionary[\"\"]\n return dictionary.most_common(2000)\n\ndef dataset(dictionary):\n path = 'data/emails/'\n files = os.listdir(path)\n emails = [path + email for email in files]\n feature_vec = []\n labels = []\n\n for email in emails:\n data = []\n f = open(email)\n words = f.read().split(\" \")\n\n for entry in dictionary:\n data.append(words.count(entry[0]))\n feature_vec.append(data)\n\n if \"ham\" in email:\n labels.append(0)\n if \"spam\" in email:\n labels.append(1)\n\n return feature_vec, labels\n\nd = dict()\nfeatures, labels = dataset(d)\n\n\nprint('Feature vector length: ', len(features))\nprint('Label length: ', len(labels))\n\n\n","sub_path":"spam_filter.py","file_name":"spam_filter.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"125563336","text":"#! python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport numpy as np\nfrom datetime import datetime\n\nimport common as cmn\n\n\n\n# Compute points that: ( x^2 + y^2 )^2 = A ( x^2 -y^2 )\n#\n\n\n\nparameters_list = [ '', '', '', '' ]\n\n\n\ndef lemniscata( x, y, A ):\n return ( x**2 + y**2 )** 2 - A * ( x**2 - y**2 )\n\n\ndef resolve( lower, upper, count, A ):\n print( lower, upper, count )\n\n start = datetime.now()\n\n b = np.linspace( lower, upper, num= count )\n x, y = np.meshgrid( b, b )\n\n L = lemniscata( x, y, A )\n\n\n elapsed = datetime.now() - start\n print( \"Elapsed time: \" + str( elapsed ) )\n\n return L\n\n\ndef prepare( params ):\n lower = int( sys.argv[ 1 ] )\n upper = int( sys.argv[ 2 ] )\n N = int( sys.argv[ 3 ] )\n A = float( sys.argv[ 4 ] )\n\n L = resolve( lower, upper, N, A )\n\n cmn.contour( lower, upper, N, L, 'L-3' )\n\n\nif __name__ == \"__main__\":\n print( sys.platform )\n if ( len( sys.argv ) >= 1 + len( parameters_list ) ):\n prepare( sys.argv )\n else:\n cmn.show_help( sys.argv[ 0 ], parameters_list )\n","sub_path":"lemniscata.3.py","file_name":"lemniscata.3.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"38252644","text":"import numpy as np\n\nimport tweak\n\n\nclass FastSwapLocalSearch():\n\n def __init__(self, problem):\n self.problem = problem\n\n def complete_swap_cost(self, solution, r, s):\n if r == s:\n return 0\n fr, fs = solution[r] - 1, solution[s] - 1\n d = self.problem.distance_matrix\n f = self.problem.flow_matrix\n cost = (d[r,r]*(f[fs,fs]-f[fr,fr]) + d[r,s]*(f[fs,fr]-f[fr,fs]) +\n d[s,r]*(f[fr,fs]-f[fs,fr]) + d[s,s]*(f[fr,fr]-f[fs,fs]))\n for k in range(self.problem.instance_size):\n if k == r or k == s:\n continue\n fk = solution[k] - 1\n cost += (d[k,r]*(f[fk,fs]-f[fk,fr]) + d[k,s]*(f[fk,fr]-f[fk,fs]) +\n d[r,k]*(f[fs,fk]-f[fr,fk]) + d[s,k]*(f[fr,fk]-f[fs,fk]))\n self.linear_evaluations += 1\n return cost\n\n def swap_cost(self, solution, u, v):\n r, s = self.last_r, self.last_s\n last_cost = self.swap_cost_matrix[u,v]\n if r != u and r != v and s != u and s != v and last_cost != None:\n d = self.problem.distance_matrix\n f = self.problem.flow_matrix\n fu, fv = solution[u] - 1, solution[v] - 1\n fr, fs = solution[r] - 1, solution[s] - 1\n cost = last_cost + (((d[r,u]-d[r,v])+(d[s,v]-d[s,u])) *\n ((f[fs,fu]-f[fs,fv])+(f[fr,fv]-f[fr,fu])) +\n ((d[u,r]-d[v,r])+(d[v,s]-d[u,s])) *\n ((f[fu,fs]-f[fv,fs])+(f[fv,fr]-f[fu,fr])))\n self.constant_evaluations += 1\n else:\n cost = self.complete_swap_cost(solution, u, v)\n self.swap_cost_matrix[u,v] = cost\n return cost\n\n def run(self, initial_solution, initial_cost=None, max_iter=float('inf')):\n current_solution = []\n self.best_solution = initial_solution\n if initial_cost == None:\n self.best_cost = self.problem.evaluate(initial_solution)\n total_evaluations = 1\n else:\n self.best_cost = initial_cost\n total_evaluations = 0\n size = self.problem.instance_size\n self.swap_cost_matrix = np.full((size, size), None)\n self.last_r, self.last_s = None, None\n self.linear_evaluations = 0\n self.constant_evaluations = 0\n iteration = 0\n while iteration < max_iter:\n best_movement_cost = float('inf')\n for r, s in tweak.random_pairs(self.problem.instance_size):\n if r >= s:\n continue\n movement_cost = self.swap_cost(self.best_solution, r, s)\n if movement_cost < best_movement_cost:\n best_movement_cost = movement_cost\n best_r, best_s = r, s\n if best_movement_cost < 0:\n self.last_r, self.last_s = best_r, best_s\n neighbour = tweak.swap(self.best_solution, best_r, best_s)\n self.best_solution = neighbour\n self.best_cost += best_movement_cost\n else:\n break\n iteration += 1\n total_evaluations += (self.linear_evaluations +\n self.constant_evaluations / size) / size\n return tuple(self.best_solution), self.best_cost, int(total_evaluations)\n","sub_path":"generate_ml_database/fast_swap_local_search.py","file_name":"fast_swap_local_search.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"181955004","text":"\nfrom odoo import models, fields, api, exceptions\nimport re\nimport datetime\nimport json\nfrom ..auth import oauth\n\n\nclass StockPinkingType(models.Model):\n _inherit = \"stock.picking.type\"\n\n is_with_guia = fields.Boolean(string=\"Es con Guia de Remision?\", default=False)\n\n\nclass StockSendComprobante(models.Model):\n _inherit = \"stock.picking\"\n\n is_guia_picking = fields.Boolean(string=\"Es guia?\", related='picking_type_id.is_with_guia')\n motivo_traslado = fields.Selection(string=\"Motivo de Traslado\", default=\"01\",\n selection=[('01', 'VENTA'),\n ('14', 'VENTA SUJETA A CONFIRMACION DEL COMPRADOR'),\n ('02', 'COMPRA'),\n ('04', 'TRASLADO ENTRE ESTABLECIMIENTOS DE LA MISMA EMPRESA'),\n ('18', 'TRASLADO EMISOR ITINERANTE CP'),\n ('08', 'IMPORTACION'),\n ('09', 'EXPORTACION'),\n ('19', 'TRASLADO A ZONA PRIMARIA'),\n ('13', 'OTROS')])\n desc_motivo_traslado = fields.Char(string=\"Descripcion Motivo Traslado\", default=\"\")\n ind_trans_program = fields.Boolean(string=\"Transbordo programado?\", default=False)\n peso_total = fields.Float(string=\"Peso total\", digits=(10, 2))\n peso_unidad_medida = fields.Char(string=\"Unidad de medida. Catalogo Nro 3\")\n numero_bultos = fields.Integer(string=\"Numero de bultos\", default=1)\n transportes = fields.One2many(\"efact.stock_transporte\", \"picking_id\", string=\"Transporte\")\n salida_ubigeo = fields.Char(string=\"Salida ubigeo\")\n salida_direccion = fields.Char(string=\"Salida direccion\")\n\n entrega_ubigeo = fields.Char(string=\"Entrega Ubigeo\")\n entrega_direccion = fields.Char(string=\"Engrega Direccion\")\n # falta contenedor y puerto\n\n estado_envio = fields.Selection(string=\"Estado de envio\",\n default=0,\n selection=[(0, \"Pendiente\"), (1, \"Enviado\"), (2, \"Error\")])\n json_enviado = fields.Text(string=\"Json enviado\")\n xml_generado = fields.Text(string=\"Xml generado\")\n digest_value = fields.Char(string=\"Digest Value\")\n\n def validar_datos_compania(self):\n errors = []\n if not self.company_id.partner_id.vat:\n errors.append(\"* No se tiene configurado el RUC de la empresa emisora\")\n if not self.company_id.partner_id.tipo_documento:\n errors.append(\"* No se tiene configurado el tipo de documento de la empresa emisora\")\n elif self.company_id.partner_id.tipo_documento != '6':\n errors.append(\"* El Tipo de Documento de la empresa emisora debe ser RUC\")\n if not self.company_id.partner_id.zip:\n errors.append(\"* No se encuentra configurado el Ubigeo de la empresa emisora.\")\n if not self.company_id.partner_id.street:\n errors.append(\"* No se encuentra configurado la dirección de la empresa emisora.\")\n if not self.company_id.partner_id.registration_name:\n errors.append(\"* No se encuentra configurado la Razón Social de la empresa emisora.\")\n return errors\n\n @api.multi\n def action_generar_comprobante_json(self):\n if self.estado_envio == 1:\n raise exceptions.UserError(\"Documento ya fue aceptado anteriormente.\")\n\n if not self.name or not re.match('T\\\\d{3}-\\\\d{1,8}', self.name):\n raise exceptions.UserError(\"El codigo no tiene el formato correcto: \" + str(self.name))\n errors = self.validar_datos_compania()\n if len(errors) > 0:\n raise exceptions.UserError(\"Error al validar datos de la compania:\\n\" + '\\n'.join(errors))\n\n serie, correlativo = self.name.split('-')\n company = self.company_id.partner_id\n receptor = self.partner_id\n\n documento = {\n \"serie\": serie,\n \"correlativo\": int(correlativo),\n \"nombreEmisor\": company.name,\n \"tipoDocEmisor\": '6',\n \"numDocEmisor\": company.vat,\n \"tipoDocReceptor\": receptor.tipo_documento,\n \"numDocReceptor\": receptor.vat,\n \"nombreReceptor\": receptor.name,\n \"motivoTraslado\": self.motivo_traslado,\n \"descripcionMotivoTraslado\": self.desc_motivo_traslado,\n \"transbordoProgramado\": self.ind_trans_program,\n \"pesoTotal\": self.peso_total,\n \"pesoUnidadMedida\": self.peso_unidad_medida,\n \"numeroBulltosPallets\": self.numero_bultos,\n \"entregaUbigeo\": self.entrega_ubigeo,\n \"entregaDireccion\": self.entrega_direccion,\n \"salidaUbigeo\": self.salida_ubigeo,\n \"salidaDireccion\": self.salida_direccion,\n }\n transportes = []\n for t in self.transportes:\n transportes.append({\n \"modoTraslado\": t.modoTraslado,\n \"fechaInicioTraslado\": t.fechaInicioTraslado,\n \"tipoDocTransportista\": t.tipoDocTransportista,\n \"numDocTransportista\": t.numDocTransportista,\n \"nombreTransportista\": t.nombreTransportista,\n \"placaVehiculo\": t.placaVehiculo,\n \"tipoDocConductor\": t.tipoDocConductor,\n \"numDocConductor\": t.numDocConductor,\n })\n detalles = []\n for d in self.move_lines:\n detalles.append({\n 'cantidadItem': d.product_uom_qty,\n 'unidadMedidaItem': d.product_uom.code,\n 'codItem': str(d.id),\n 'nombreItem': d.name,\n })\n\n data = {\n \"tipoDocumento\": \"09\",\n \"fechaEmision\": datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n \"documento\": documento,\n \"transportes\": transportes,\n \"detalle\": detalles\n }\n self.json_enviado = json.dumps(data, ensure_ascii=False, indent=2)\n\n resp = oauth.enviar_doc_url(\n self.company_id.endpoint,\n data,\n oauth.generate_token_by_company(self.company_id),\n self.company_id.tipo_envio)\n\n resp = resp.json()\n if not resp['success']:\n raise exceptions.UserError(\"Error en la api:\\n\" + json.dumps(resp, ensure_ascii=False, indent=2))\n\n resp = resp['result']\n # print(json.dumps(resp, ensure_ascii=False, indent=2))\n if resp.get(\"success\", False) and resp.get(\"sunat_status\", \"x\") == \"A\":\n self.digest_value = resp[\"digest_value\"]\n self.xml_generado = resp[\"signed_xml\"]\n self.estado_envio = 1\n else:\n self.estado_envio = 2\n if \"errors\" in resp and type(resp['errors']) == str:\n msg = resp['errors']\n else:\n msg = json.dumps(resp, ensure_ascii=False, indent=2)\n\n raise exceptions.UserError(msg)\n return True\n\n\nclass StockTransporte(models.Model):\n _name = \"efact.stock_transporte\"\n\n modoTraslado = fields.Selection(string=\"Modalidad de traslado\",\n selection=[(\"01\", \"Publico\"), (\"02\", \"Privado\")])\n fechaInicioTraslado = fields.Date(string=\"Inicio del traslado\")\n tipoDocTransportista = fields.Char(string=\"Transportista>Tipo documento\")\n numDocTransportista = fields.Char(string=\"Transportista>Numero documento\")\n nombreTransportista = fields.Char(string=\"Transportista>Nombre\")\n placaVehiculo = fields.Char(string=\"Placa Vehiculo\")\n tipoDocConductor = fields.Char(\"Conductor>Tipo documento\")\n numDocConductor = fields.Char(\"Conductor>Numero de documento\")\n\n picking_id = fields.Many2one(\"stock.picking\", string=\"Picking\", required=True)\n","sub_path":"addons/efact/models/stock/stock_send_comprobante.py","file_name":"stock_send_comprobante.py","file_ext":"py","file_size_in_byte":7887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"341461450","text":"def reverse_alternate(string: str) -> str:\n \"\"\"\n Reverses every other word.\n \n Args:\n string: A string.\n \n Returns:\n A string with every other word reversed. \n Punctuations are included with the word.\n \"\"\"\n sep_string = string.strip().split()\n res = []\n for word in sep_string:\n if word in sep_string[1::2]:\n res.append(word[::-1])\n else:\n res.append(word)\n return ' '.join(res)\n","sub_path":"codewars/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"211905598","text":"import re\nimport json\n\nfrom .common import InfoExtractor\n\n\nclass ExfmIE(InfoExtractor):\n IE_NAME = u'exfm'\n IE_DESC = u'ex.fm'\n _VALID_URL = r'(?:http://)?(?:www\\.)?ex\\.fm/song/([^/]+)'\n _SOUNDCLOUD_URL = r'(?:http://)?(?:www\\.)?api\\.soundcloud\\.com/tracks/([^/]+)/stream'\n _TESTS = [\n {\n u'url': u'http://ex.fm/song/eh359',\n u'file': u'44216187.mp3',\n u'md5': u'e45513df5631e6d760970b14cc0c11e7',\n u'info_dict': {\n u\"title\": u\"Test House \\\"Love Is Not Enough\\\" (Extended Mix) DeadJournalist Exclusive\",\n u\"uploader\": u\"deadjournalist\",\n u'upload_date': u'20120424',\n u'description': u'Test House \\\"Love Is Not Enough\\\" (Extended Mix) DeadJournalist Exclusive',\n },\n u'note': u'Soundcloud song',\n u'skip': u'The site is down too often',\n },\n {\n u'url': u'http://ex.fm/song/wddt8',\n u'file': u'wddt8.mp3',\n u'md5': u'966bd70741ac5b8570d8e45bfaed3643',\n u'info_dict': {\n u'title': u'Safe and Sound',\n u'uploader': u'Capital Cities',\n },\n u'skip': u'The site is down too often',\n },\n ]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n song_id = mobj.group(1)\n info_url = \"http://ex.fm/api/v3/song/%s\" %(song_id)\n webpage = self._download_webpage(info_url, song_id)\n info = json.loads(webpage)\n song_url = info['song']['url']\n if re.match(self._SOUNDCLOUD_URL, song_url) is not None:\n self.to_screen('Soundcloud song detected')\n return self.url_result(song_url.replace('/stream',''), 'Soundcloud')\n return [{\n 'id': song_id,\n 'url': song_url,\n 'ext': 'mp3',\n 'title': info['song']['title'],\n 'thumbnail': info['song']['image']['large'],\n 'uploader': info['song']['artist'],\n 'view_count': info['song']['loved_count'],\n }]\n","sub_path":"youtube_dl/extractor/exfm.py","file_name":"exfm.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393335591","text":"from constants import XP_TABLE\r\nfrom constants import SKILL_NAMES\r\n\r\nclass Skill(object):\r\n def __init__(self, name, rank, level, xp, vlevel = 0, is_gain = False):\r\n self.name = name\r\n self.skill_num = SKILL_NAMES.index(name)\r\n self.rank = rank\r\n self.level = level\r\n self.xp = xp\r\n self.vlevel = vlevel\r\n self.tnl = \"\"\r\n if self.name != \"Overall\" and not is_gain:\r\n self.vlevel = self.calc_vlevel()\r\n self.tnl = self.calc_tnl()\r\n \r\n def calc_vlevel(self):\r\n i = self.level\r\n while XP_TABLE[i] <= self.xp:\r\n i += 1\r\n return i\r\n \r\n def calc_tnl(self):\r\n xp_req = XP_TABLE[self.vlevel]\r\n return xp_req - self.xp\r\n \r\n def get_info(self, info):\r\n if info == \"rank\":\r\n return self.rank\r\n elif info == \"level\":\r\n return self.level\r\n elif info == \"vlevel\":\r\n return self.vlevel\r\n elif info == \"xp\":\r\n return self.xp\r\n elif info == \"tnl\":\r\n return self.tnl\r\n elif info == \"name\":\r\n return self.skill_num\r\n \r\n def __str__(self):\r\n s = \"{}({}) {}, {} xp, ranked {}. {} xp to next level.\"\r\n return s.format(self.level, self.vlevel, self.name, self.xp, self.rank, self.tnl)","sub_path":"skill.py","file_name":"skill.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"541518168","text":"\"\"\"Add reviewer role\n\nRevision ID: 90f1af83d9b6\nRevises: 5d619660cfa7\nCreate Date: 2016-05-17 02:16:30.097950\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '90f1af83d9b6'\ndown_revision = '5d619660cfa7'\n\nfrom alembic import op\n\n\ndef upgrade():\n \"\"\"Add reviewer role.\"\"\"\n op.execute(\"INSERT INTO roles (name) VALUES ('reviewer');\")\n\n\ndef downgrade():\n \"\"\"Remove reviewer role and dependencies.\"\"\"\n op.execute(\n \"DELETE FROM roles_users WHERE role_id in (\"\n \"SELECT id FROM roles WHERE name='reviewer');\"\n )\n op.execute(\"DELETE FROM roles WHERE name='reviewer';\")\n","sub_path":"migrations/versions/90f1af83d9b6_add_reviewer_role.py","file_name":"90f1af83d9b6_add_reviewer_role.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"217577695","text":"import tensorflow as tf\nimport numpy as np\nimport scipy.io\nK=4 #Number of users\nN=8 #Number of receiving antenna\nbatch_size=5000 #Define the batch size \nSTEPS=100000 #Number of iteration\nHdata=scipy.io.loadmat('H_8*4')\nH=Hdata['H']\nH=H.astype('float32')\n\n#Define the model of the system\nx=tf.placeholder(tf.float32,shape=(None,K),name=\"transmit\")\ny=tf.placeholder(tf.float32,shape=(None,N),name=\"receiver\")\n\n#Define the parameters of the network\n#layer1\nw10=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb10=tf.Variable(tf.zeros([5*K,1]))\nw20=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb20=tf.Variable(tf.zeros([K,1]))\nw30=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb30=tf.Variable(tf.zeros([2*K,1]))\n\n#layer2\nw11=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb11=tf.Variable(tf.zeros([5*K,1]))\nw21=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb21=tf.Variable(tf.zeros([K,1]))\nw31=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb31=tf.Variable(tf.zeros([2*K,1]))\n\n#layer3\nw12=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb12=tf.Variable(tf.zeros([5*K,1]))\nw22=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb22=tf.Variable(tf.zeros([K,1]))\nw32=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb32=tf.Variable(tf.zeros([2*K,1]))\n\n#layer4\nw13=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb13=tf.Variable(tf.zeros([5*K,1]))\nw23=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb23=tf.Variable(tf.zeros([K,1]))\nw33=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb33=tf.Variable(tf.zeros([2*K,1]))\n\n#layer5\nw14=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb14=tf.Variable(tf.zeros([5*K,1]))\nw24=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb24=tf.Variable(tf.zeros([K,1]))\nw34=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb34=tf.Variable(tf.zeros([2*K,1]))\n\n#layer6\nw15=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb15=tf.Variable(tf.zeros([5*K,1]))\nw25=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb25=tf.Variable(tf.zeros([K,1]))\nw35=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb35=tf.Variable(tf.zeros([2*K,1]))\n\n#layer7\nw16=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb16=tf.Variable(tf.zeros([5*K,1]))\nw26=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb26=tf.Variable(tf.zeros([K,1]))\nw36=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb36=tf.Variable(tf.zeros([2*K,1]))\n\n#layer8\nw17=tf.Variable(tf.random_normal([5*K,5*K],stddev=1,seed=1))\nb17=tf.Variable(tf.zeros([5*K,1]))\nw27=tf.Variable(tf.random_normal([K,5*K],stddev=1,seed=1))\nb27=tf.Variable(tf.zeros([K,1]))\nw37=tf.Variable(tf.random_normal([2*K,5*K],stddev=1,seed=1))\nb37=tf.Variable(tf.zeros([2*K,1]))\n\n\n\n\n\n#Define the process of the forward propagation \n#layer1\ncombination1=tf.matmul(tf.transpose(H),tf.transpose(y))\ns=tf.shape(combination1)\nv0=tf.zeros([2*K,s[1]])\nx0=tf.zeros([K,s[1]])\nHtrH=tf.matmul(tf.transpose(H),H)\ncombination2=tf.matmul(HtrH,x0)\nconcatenation=tf.concat([combination1,x0,combination2,v0],0,name=\"Concatenate\")\ncal1=tf.matmul(w10,concatenation)+b10\nz0=tf.nn.relu(cal1,name=\"Z0\")\ncal2=tf.matmul(w20,z0)+b20\nt0=0.5\nx1=-1+tf.nn.relu(cal2+t0)/abs(t0)-tf.nn.relu(cal2-t0)/abs(t0)\nv1=tf.matmul(w30,z0)+b30\n\n#layer2\ncombination2_1=tf.matmul(HtrH,x1)\nconcatenation_1=tf.concat([combination1,x1,combination2_1,v1],0,name=\"Concatenate1\")\ncal1_1=tf.matmul(w11,concatenation_1)+b11\nz1=tf.nn.relu(cal1_1,name=\"Z1\")\ncal2_1=tf.matmul(w21,z1)+b21+x1\nt1=0.5\nx2=-1+tf.nn.relu(cal2_1+t1)/abs(t1)-tf.nn.relu(cal2_1-t1)/abs(t1)\nv2=tf.matmul(w31,z1)+b31\n\n#layer3\ncombination2_2=tf.matmul(HtrH,x2)\nconcatenation_2=tf.concat([combination1,x2,combination2_2,v2],0,name=\"Concatenate2\")\ncal1_2=tf.matmul(w12,concatenation_2)+b12\nz2=tf.nn.relu(cal1_2,name=\"Z2\")\ncal2_2=tf.matmul(w22,z2)+b22+x2\nt2=0.5\nx3=-1+tf.nn.relu(cal2_2+t2)/abs(t2)-tf.nn.relu(cal2_2-t2)/abs(t2)\nv3=tf.matmul(w32,z2)+b32\n\n#layer4\ncombination2_3=tf.matmul(HtrH,x3)\nconcatenation_3=tf.concat([combination1,x3,combination2_3,v3],0,name=\"Concatenate3\")\ncal1_3=tf.matmul(w13,concatenation_3)+b13\nz3=tf.nn.relu(cal1_3,name=\"Z3\")\ncal2_3=tf.matmul(w23,z3)+b23+x3\nt3=0.5\nx4=-1+tf.nn.relu(cal2_3+t3)/abs(t3)-tf.nn.relu(cal2_3-t3)/abs(t3)\nv4=tf.matmul(w33,z3)+b33\n\n#layer5\ncombination2_4=tf.matmul(HtrH,x4)\nconcatenation_4=tf.concat([combination1,x4,combination2_4,v4],0,name=\"Concatenate4\")\ncal1_4=tf.matmul(w14,concatenation_4)+b14\nz4=tf.nn.relu(cal1_4,name=\"Z4\")\ncal2_4=tf.matmul(w24,z4)+b24+x4\nt4=0.5\nx5=-1+tf.nn.relu(cal2_4+t4)/abs(t4)-tf.nn.relu(cal2_4-t4)/abs(t4)\nv5=tf.matmul(w34,z4)+b34\n\n#layer6\ncombination2_5=tf.matmul(HtrH,x5)\nconcatenation_5=tf.concat([combination1,x5,combination2_5,v5],0,name=\"Concatenate5\")\ncal1_5=tf.matmul(w15,concatenation_5)+b15\nz5=tf.nn.relu(cal1_5,name=\"Z5\")\ncal2_5=tf.matmul(w25,z5)+b25+x5\nt5=0.5\nx6=-1+tf.nn.relu(cal2_5+t5)/abs(t5)-tf.nn.relu(cal2_5-t5)/abs(t5)\nv6=tf.matmul(w35,z5)+b35\n\n#layer7\ncombination2_6=tf.matmul(HtrH,x6)\nconcatenation_6=tf.concat([combination1,x6,combination2_6,v6],0,name=\"Concatenate6\")\ncal1_6=tf.matmul(w16,concatenation_6)+b16\nz6=tf.nn.relu(cal1_6,name=\"Z6\")\ncal2_6=tf.matmul(w26,z6)+b26+x6\nt6=0.5\nx7=-1+tf.nn.relu(cal2_6+t6)/abs(t6)-tf.nn.relu(cal2_6-t6)/abs(t6)\nv7=tf.matmul(w36,z6)+b36\n\n#layer8\ncombination2_7=tf.matmul(HtrH,x7)\nconcatenation_7=tf.concat([combination1,x7,combination2_7,v7],0,name=\"Concatenate7\")\ncal1_7=tf.matmul(w17,concatenation_7)+b17\nz7=tf.nn.relu(cal1_7,name=\"Z7\")\ncal2_7=tf.matmul(w27,z7)+b27+x7\nt7=0.5\nx8=-1+tf.nn.relu(cal2_7+t7)/abs(t7)-tf.nn.relu(cal2_7-t7)/abs(t7)\nv8=tf.matmul(w37,z7)+b37\n\n\n\n\n\n#Define loss function and backpropagation algorithm\nxwave_part1=tf.matrix_inverse(tf.matmul(tf.transpose(H),H))\nxwave_part2=tf.matmul(xwave_part1,tf.transpose(H))\nxwave=tf.matmul(xwave_part2,tf.transpose(y))\n\nlossfunction=tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x1))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\\\n\t\t\t +tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x2))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\\\n +tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x3))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\\\n +tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x4))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\\\n +tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x5))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\\\n\t\t\t +tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x6))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\\\n +tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x7))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\\\n +tf.log(tf.reduce_sum(tf.squared_difference(tf.transpose(x),x8))/tf.reduce_sum(tf.squared_difference(tf.transpose(x),xwave)))\ntrain_step=tf.train.AdamOptimizer(0.001).minimize(lossfunction)\n\n\n\n\n# define function to generate data\ndef generate_data(batchsize,K,N,H):\n\tsource = np.random.randint(0, 2, (batchsize * K, 1)) # generate 0,1 bits\n\tx_ = -2.0 * source + 1.0 # BPSK modulation\n\tx_ = np.reshape(x_, (batchsize, K))\n\tw = np.zeros((N, batchsize)) # Noise Vector with independent,zero mean Gaussian variables of variance 1\n\tfor j in range(batchsize):\n\t\tSNR = np.random.uniform(8, 13, 1) ##8dB-13dB uniform distribution\n\t\tsigma = np.sqrt(1 / (10 ** (SNR / 10)))\n\t\twpart = sigma * np.random.randn(N)\n\t\tw[:, j] = wpart\n\tx_ = x_.astype('float32')\n\ty_ = np.dot(H, np.transpose(x_)) + w\n\ty_ = np.transpose(y_)\n\treturn source, x_, y_\n\ndef generate_testdata(symbolnum,K,N,H,SNR):\n\tsource = np.random.randint(0, 2, (symbolnum * K, 1)) # generate 0,1 bits\n\tx_ = -2.0 * source + 1.0 # BPSK modulation\n\tx_ = np.reshape(x_, (symbolnum, K))\n\tw = np.zeros((N, symbolnum)) # Noise Vector with independent,zero mean Gaussian variables of variance 1\n\tsigma = np.sqrt(1 / (10 ** (SNR / 10)))\n\tfor j in range(symbolnum):\n\t\twpart = sigma * np.random.randn(N)\n\t\tw[:, j] = wpart\n\tx_ = x_.astype('float32')\n\ty_ = np.dot(H, np.transpose(x_)) + w\n\ty_ = np.transpose(y_)\n\treturn source, x_, y_\n\n\n\n#Create a session to run Tensorflow\nsess=tf.Session()\ninit_op=tf.global_variables_initializer()\nsess.run(init_op)\nfor i in range(STEPS):\n\tsource, x_, y_ = generate_data(batch_size,K,N,H)\n\tsess.run(train_step,\n\t\t\tfeed_dict={x:x_,y:y_}) #train wk bk\n\n\tif i%5000==0:\n\t\ttotal_lossfunction=sess.run(lossfunction,feed_dict={x:x_,y:y_})\n\t\tprint(\"After %d training steps,cross entropy on all data is %g\"%(i,total_lossfunction))\n\t\tprint(\"x3:\",sess.run(x3,feed_dict={y:y_}))\n\t\tprint(\"v3:\",sess.run(v3,feed_dict={y:y_}))\n\t\tprint(\"z3:\",sess.run(z3,feed_dict={y:y_}))\n\n\n\nprint(\"w11:\",sess.run(w11))\nprint(\"w21:\",sess.run(w21))\nprint(\"w31:\",sess.run(w31))\nprint(\"b11:\",sess.run(b11))\nprint(\"b21:\",sess.run(b21))\nprint(\"b31:\",sess.run(b31))\n\n\n\ntestsymbolnum=100000\nxkout=np.zeros((K,testsymbolnum))\nxkout2=np.zeros((K,testsymbolnum))\nxkout3=np.zeros((K,testsymbolnum))\nxkout4=np.zeros((K,testsymbolnum))\nxkout5=np.zeros((K,testsymbolnum))\nxkout6=np.zeros((K,testsymbolnum))\nxkout7=np.zeros((K,testsymbolnum))\nxkout8=np.zeros((K,testsymbolnum))\nfor i1 in range (8,14):\n\tsource_test,x_test,y_test=generate_testdata(testsymbolnum, K, N, H, i1)\n\txkout1 = sess.run(x1, feed_dict={y: y_test})\n\txkout2 = sess.run(x2, feed_dict={y: y_test})\n\txkout3 = sess.run(x3, feed_dict={y: y_test})\n\txkout4 = sess.run(x4, feed_dict={y: y_test})\n\txkout5 = sess.run(x5, feed_dict={y: y_test})\n\txkout6 = sess.run(x6, feed_dict={y: y_test})\n\txkout7 = sess.run(x7, feed_dict={y: y_test})\n\txkout8 = sess.run(x8, feed_dict={y: y_test})\n\tsourcetest_mat = np.reshape(source_test, (testsymbolnum, K))\n\tscipy.io.savemat('data/layernum8/SNR%d/source.mat'%i1, {'source': sourcetest_mat})\n\tscipy.io.savemat('data/layernum8/SNR%d/x1.mat'%i1, {'x1': xkout1})\n\tscipy.io.savemat('data/layernum8/SNR%d/x2.mat'%i1, {'x2': xkout2})\n\tscipy.io.savemat('data/layernum8/SNR%d/x3.mat'%i1, {'x3': xkout3})\n\tscipy.io.savemat('data/layernum8/SNR%d/x4.mat'%i1, {'x4': xkout4})\n\tscipy.io.savemat('data/layernum8/SNR%d/x5.mat'%i1, {'x5': xkout5})\n\tscipy.io.savemat('data/layernum8/SNR%d/x6.mat'%i1, {'x6': xkout6})\n\tscipy.io.savemat('data/layernum8/SNR%d/x7.mat'%i1, {'x7': xkout7})\n\tscipy.io.savemat('data/layernum8/SNR%d/x8.mat'%i1, {'x8': xkout8})\n\tscipy.io.savemat('data/layernum8/SNR%d/y.mat'%i1, {'y': y_test})\n\n\n\n\n#test the model one batch\n# xkout=np.zeros((K,batch_size))\n# xkout=sess.run(x1,feed_dict={y:y_})\n# xkout2=np.zeros((K,batch_size))\n# xkout2=sess.run(x2,feed_dict={y:y_})\n# xkout3=np.zeros((K,batch_size))\n# xkout3=sess.run(x3,feed_dict={y:y_})\n# xkout4=np.zeros((K,batch_size))\n# xkout4=sess.run(x4,feed_dict={y:y_})\n# source_mat= np.reshape(source, (batch_size, K))\n# scipy.io.savemat('source.mat',{'source':source_mat})\n# scipy.io.savemat('x1.mat',{'x1':xkout})\n# scipy.io.savemat('x2.mat',{'x2':xkout2})\n# scipy.io.savemat('x3.mat',{'x3':xkout3})\n# scipy.io.savemat('x4.mat',{'x4':xkout4})\n# scipy.io.savemat('y.mat',{'y':y_})\n\n# #test the model\n# testsymbolnum=1000000\n# source_test, x_test, y_test = generate_data(testsymbolnum,K,N,H)\n# xkout=np.zeros((K,testsymbolnum))\n# xkout=sess.run(x1,feed_dict={y:y_test})\n# xkout2=np.zeros((K,testsymbolnum))\n# xkout2=sess.run(x2,feed_dict={y:y_test})\n# xkout3=np.zeros((K,testsymbolnum))\n# xkout3=sess.run(x3,feed_dict={y:y_test})\n# xkout4=np.zeros((K,testsymbolnum))\n# xkout4=sess.run(x4,feed_dict={y:y_test})\n# xkout5=np.zeros((K,testsymbolnum))\n# xkout5=sess.run(x5,feed_dict={y:y_test})\n# xkout6=np.zeros((K,testsymbolnum))\n# xkout6=sess.run(x6,feed_dict={y:y_test})\n# sourcetest_mat= np.reshape(source_test, (testsymbolnum, K))\n# scipy.io.savemat('source.mat',{'source':sourcetest_mat})\n# scipy.io.savemat('x1.mat',{'x1':xkout})\n# scipy.io.savemat('x2.mat',{'x2':xkout2})\n# scipy.io.savemat('x3.mat',{'x3':xkout3})\n# scipy.io.savemat('x4.mat',{'x4':xkout4})\n# scipy.io.savemat('x5.mat',{'x5':xkout5})\n# scipy.io.savemat('x6.mat',{'x6':xkout6})\n# scipy.io.savemat('y.mat',{'y':y_test})\n\n\n\n\nsess.close()\nprint(\"end\")","sub_path":"Deep MIMO Detection Code/resnet/mimo_detection_batchtraining.py","file_name":"mimo_detection_batchtraining.py","file_ext":"py","file_size_in_byte":12134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"598041445","text":"'''\n@author:lvming\n@time:2021/6/25\n'''\nfrom web01.selenium11 import DriverKey\n\n'''\n 基于关键字驱动类实现的测试用例\n'''\n# 测试用例1:实现电商的登录\nwk = DriverKey('Chrome')\nwk.visit('http://39.98.138.157/shopxo/index.php')\nwk.click('link text','登录')\nwk.input('name','accounts','xuzhu666')\nwk.input('name','pwd','123456')\nwk.click('xpath','//button[text()=\"登录\"]')\nwk.sleep(3)\nwk.quit()\n\n# 测试用例2:百度搜索\ndk = DriverKey('Chrome')\ndk.visit('http://www.jd.com')\ndk.input('id','key','笔记本')\ndk.click('xpath','//button[@aria-label=\"搜索\"]')\ndk.sleep(3)\ndk.quit()\n","sub_path":"web01/test_key.py","file_name":"test_key.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"252801899","text":"\"\"\"\nFilename: plot_ohc_metric_timeseries.py\nAuthor: Damien Irving, irving.damien@gmail.com\nDescription: Plot timeseries of various ocean temperature metrics\n for a single model/experiment/run\n\n\"\"\"\n\n# Import general Python modules\n\nimport sys, os, pdb\nimport argparse\n\nimport matplotlib.pyplot as plt\nimport seaborn\nimport numpy\n\nimport iris\nimport iris.quickplot as qplt\nfrom iris.util import rolling_window\n\n\n# Import my modules\n\ncwd = os.getcwd()\nrepo_dir = '/'\nfor directory in cwd.split('/')[1:]:\n repo_dir = os.path.join(repo_dir, directory)\n if directory == 'ocean-analysis':\n break\n\nmodules_dir = os.path.join(repo_dir, 'modules')\nsys.path.append(modules_dir)\n\ntry:\n import general_io as gio\n import convenient_universal as uconv\nexcept ImportError:\n raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')\n\n\n# Define functions\n\ndef plot_timeseries(cube_dict, user_regions, title, tex_units, ref_region=None):\n \"\"\"Create the timeseries plot.\"\"\"\n\n region_dict = {'globe': ('globe', 'black', '--'),\n 'globe60': ('globe (60S - 60N)', 'black', '-'),\n 'tropics': ('tropics (20S to 20N)', 'purple', '-'),\n 'ne': ('northern extratropics (north of 20N)', 'red', '--'),\n 'ne60': ('northern extratropics (20N - 60N)', 'red', '-'),\n 'nh60': ('northern hemisphere (to 60N)', 'red', '-.'),\n 'se': ('southern extratropics (south of 20S)', 'blue', '--'),\n 'se60': ('southern extratropics (60S - 20S)', 'blue', '-'),\n 'sh60': ('southern hemisphere (to 60S)', 'blue', '-.'),\n 'ose': ('outside southern extratropics (north of 20S)', '#cc0066', '-.'),\n 'ose60': ('outside southern extratropics (20S - 60N)', '#cc0066', '--')}\n\n for region in user_regions:\n name, color, style = region_dict[region]\n cube = cube_dict[name]\n qplt.plot(cube.coord('time'), cube, label=name, color=color, linestyle=style)\n\n plt.legend(loc='best')\n plt.title(title)\n if ref_region:\n ylabel = '%s equivalent ocean heat content (%s)' %(region_dict[ref_region][0], tex_units)\n else:\n ylabel = 'ocean heat content (%s)' %(tex_units)\n plt.ylabel(ylabel)\n plt.xlabel('year')\n\n\ndef set_title(data_dict, inargs, plotnum):\n \"\"\"Set the title for the plot\"\"\"\n\n if inargs.argo:\n title = 'Argo'\n else:\n model, experiment, run = gio.get_cmip5_file_details(data_dict['globe'])\n if inargs.experiment:\n experiment = inargs.experiment[plotnum].replace('_',' ')\n if inargs.run:\n run = inargs.run[plotnum]\n \n title = '%s, %s, %s' %(model, experiment, run)\n\n return title\n\n\ndef check_inputs(inargs):\n \"\"\"Check the validity of the input arguments.\"\"\"\n\n assert len(inargs.infiles) <= inargs.nrows * inargs.ncols\n if inargs.experiment:\n assert len(inargs.infiles) == len(inargs.experiment)\n if inargs.run:\n assert len(inargs.infiles) == len(inargs.run)\n\n\ndef main(inargs):\n \"\"\"Run the program.\"\"\"\n\n check_inputs(inargs)\n\n # Read data\n try:\n time_constraint = gio.get_time_constraint(inargs.time)\n except AttributeError:\n time_constraint = iris.Constraint()\n\n metadata_dict = {}\n fig = plt.figure(figsize=inargs.figsize)\n if not inargs.figsize:\n print('figure width: %s' %(str(fig.get_figwidth())))\n print('figure height: %s' %(str(fig.get_figheight())))\n\n for plotnum, infile in enumerate(inargs.infiles):\n\n if not os.path.isfile(infile):\n continue\n\n data_dict = {}\n with iris.FUTURE.context(cell_datetime_objects=True):\n data_dict['globe'] = iris.load_cube(infile, 'ocean heat content globe' & time_constraint)\n data_dict['globe (60S - 60N)'] = iris.load_cube(infile, 'ocean heat content globe60' & time_constraint)\n data_dict['southern extratropics (south of 20S)'] = iris.load_cube(infile, 'ocean heat content southern extratropics' & time_constraint)\n data_dict['northern extratropics (north of 20N)'] = iris.load_cube(infile, 'ocean heat content northern extratropics' & time_constraint)\n data_dict['southern extratropics (60S - 20S)'] = iris.load_cube(infile, 'ocean heat content southern extratropics60' & time_constraint)\n data_dict['northern extratropics (20N - 60N)'] = iris.load_cube(infile, 'ocean heat content northern extratropics60' & time_constraint)\n data_dict['outside southern extratropics (north of 20S)'] = iris.load_cube(infile, 'ocean heat content outside southern extratropics' & time_constraint)\n data_dict['outside southern extratropics (20S - 60N)'] = iris.load_cube(infile, 'ocean heat content outside southern extratropics60' & time_constraint)\n data_dict['southern hemisphere (to 60S)'] = iris.load_cube(infile, 'ocean heat content sh60' & time_constraint)\n data_dict['northern hemisphere (to 60N)'] = iris.load_cube(infile, 'ocean heat content nh60' & time_constraint)\n data_dict['tropics (20S to 20N)'] = iris.load_cube(infile, 'ocean heat content tropics' & time_constraint)\n metadata_dict[infile] = data_dict['globe'].attributes['history']\n\n # Calculate the annual mean timeseries\n for key, value in data_dict.items():\n data_dict[key] = value.rolling_window('time', iris.analysis.MEAN, 12)\n tex_units, exponent = uconv.units_info(str(value.units))\n\n # Generate plot\n title = set_title(data_dict, inargs, plotnum)\n ax = plt.subplot(inargs.nrows, inargs.ncols, plotnum + 1)\n plt.sca(ax)\n plot_timeseries(data_dict, inargs.regions, title, tex_units, ref_region=inargs.ref_region)\n \n # Write output\n plt.tight_layout(pad=0.4, w_pad=2.0, h_pad=2.0)\n plt.savefig(inargs.outfile, bbox_inches='tight')\n gio.write_metadata(inargs.outfile, file_info=metadata_dict)\n\n\nif __name__ == '__main__':\n\n extra_info =\"\"\" \nauthor:\n Damien Irving, irving.damien@gmail.com\n \n\"\"\"\n\n description='Plot timeseries of various ocean temperature metrics'\n parser = argparse.ArgumentParser(description=description,\n epilog=extra_info, \n argument_default=argparse.SUPPRESS,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument(\"infiles\", type=str, nargs='*', help=\"Input temperature metric files (write blank for empty plots on grid)\")\n parser.add_argument(\"outfile\", type=str, help=\"Output file name\")\n \n parser.add_argument(\"--ref_region\", type=str, default=None, \n help=\"Metrics are scaled to the volume of this region\")\n\n parser.add_argument(\"--time\", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),\n help=\"Time period [default = entire]\")\n\n parser.add_argument(\"--regions\", type=str, nargs='*', default=('globe60', 'ne60', 'tropics', 'se60', 'ose60'), \n help=\"regions to plot\")\n\n parser.add_argument(\"--nrows\", type=int, default=1, \n help=\"number of rows in the entire grid of plots\")\n parser.add_argument(\"--ncols\", type=int, default=1,\n help=\"number of columns in the entire grid of plots\")\n parser.add_argument(\"--figsize\", type=float, default=None, nargs=2, metavar=('WIDTH', 'HEIGHT'),\n help=\"size of the figure (in inches)\")\n parser.add_argument(\"--experiment\", type=str, nargs='*', default=None,\n help=\"overwrite the default experiment in the plot header (write blank for empty plots on grid)\")\n parser.add_argument(\"--run\", type=str, nargs='*', default=None,\n help=\"overwrite the default run in the plot header (write blank for empty plots on grid)\")\n\n\n parser.add_argument(\"--argo\", action=\"store_true\", default=False,\n help=\"switch for indicated an Argo rather than CMIP5 input file [default: False]\")\n\n\n args = parser.parse_args() \n main(args)\n","sub_path":"visualisation/plot_ohc_metric_timeseries.py","file_name":"plot_ohc_metric_timeseries.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"568506587","text":"def find_page_number(pages):\n wrong_pages = []\n last_page = 0\n\n for page_num in pages:\n if page_num == last_page+1:\n last_page+=1\n else:\n wrong_pages.append(page_num)\n return wrong_pages\nprint (find_page_number([4,1,2,3,4,26,5,6]))","sub_path":"pythonCodeWars/disorganisePageList.py","file_name":"disorganisePageList.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"642022807","text":"# -*- coding: utf-8 -*-\n\nfrom dp_tornado.engine.controller import Controller as dpController\n\n\nclass HideController(dpController):\n def post(self, room_no=None):\n if not self.get_argument('uniqid') or not room_no:\n return\n\n session = self.model.bjs.admin.user.session.authorized(self)\n output = self.model.bjs.admin.live.controller_hide(self, session, room_no)\n\n self.finish(output)","sub_path":"controller/admin/vod/hide.py","file_name":"hide.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"618298028","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/workers/tasks.py\n# Compiled at: 2019-05-07 08:43:55\n# Size of source mod 2**32: 5053 bytes\nfrom __future__ import absolute_import, unicode_literals\nfrom django.conf import settings\nfrom celery import Celery\napp = Celery('mendelmd')\napp.config_from_object('django.conf:settings')\napp.autodiscover_tasks(lambda : settings.INSTALLED_APPS)\nfrom tasks.models import Task\nfrom workers.models import Worker\nfrom django.db.models import Q\nfrom subprocess import run, check_output\nfrom helpers.scw_wrapper import SCW\nfrom helpers.aws_wrapper import AWS\nfrom settings.models import Provider\n\n@app.task(queue='master')\ndef check_queue():\n print('Check Queue')\n max_workers = 50\n tasks = Task.objects.filter(status='scheduled')\n workers = Worker.objects.filter(~Q(status='terminated'))\n n_tasks = len(tasks)\n n_workers = len(workers)\n print(n_tasks, n_workers)\n if n_tasks > n_workers:\n if n_workers < max_workers:\n n_workers_to_launch = min(n_tasks, max_workers - n_workers)\n print('Launch Workers', n_workers_to_launch)\n for i in range(0, n_workers_to_launch):\n launch_worker.delay()\n\n if n_tasks < n_workers:\n print('Terminate Workers')\n terminate_workers()\n\n\n@app.task(queue='master')\ndef launch_worker():\n worker = Worker()\n worker.name = 'New Worker'\n worker.status = 'new'\n worker.save()\n if settings.DEFAULT_PROVIDER == 'AWS':\n provider = Provider.objects.filter(name='AWS')[0]\n print(provider, provider.config)\n worker_result = AWS().launch(provider.config)\n worker.provider = 'AWS'\n worker.type = provider.config['instance_type']\n else:\n worker_result = SCW().launch(provider.config)\n worker.provider = 'SCW'\n worker.type = ''\n worker.ip = worker_result['ip']\n worker.worker_id = worker_result['id']\n worker.save()\n install_worker.delay(worker.id)\n\n\n@app.task(queue='master')\ndef launch_workers(n_workers, type):\n workers = []\n for i in range(0, int(n_workers)):\n worker = Worker()\n worker.name = 'New Worker'\n worker.type = type\n worker.status = 'new'\n worker.save()\n workers.append(worker)\n\n for i, worker in enumerate(workers):\n print('Launch ', i)\n worker_result = SCW().launch(type)\n worker.ip = worker_result['ip']\n worker.worker_id = worker_result['id']\n worker.save()\n install_worker.delay(worker.id)\n\n\n@app.task(queue='master')\ndef terminate_workers():\n idle_workers = Worker.objects.filter(status='idle')\n for worker in idle_workers:\n print('Terminate Worker')\n AWS().terminate(worker.worker_id)\n print('Terminate Worker', worker.id)\n worker.status = 'terminated'\n worker.save()\n\n\n@app.task(queue='master')\ndef terminate_worker(worker_id):\n worker = Worker.objects.get(id=worker_id)\n print('Terminate Worker', worker.id)\n AWS().terminate(worker.worker_id)\n worker.status = 'terminated'\n worker.save()\n\n\n@app.task(queue='master')\ndef install_worker(worker_id):\n worker = Worker.objects.get(id=worker_id)\n print('Install Worker', worker.id)\n if settings.DEFAULT_PROVIDER == 'AWS':\n AWS().install(worker.ip)\n\n\n@app.task(queue='master')\ndef update_worker(worker_id):\n worker = Worker.objects.get(id=worker_id)\n print('Update Worker', worker.id)\n if settings.DEFAULT_PROVIDER == 'AWS':\n AWS().update(worker.ip)\n\n\n@app.task(queue='master')\ndef check_workers():\n workers = Worker.objects.all()\n for worker in workers:\n ip = worker.ip\n command = 'top -bcn1 -w512 | head -n 10'\n command = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s %s' % (ip, command)\n output = check_output(command, shell=True)\n text = output.decode('utf-8').splitlines()\n process_list_started = False\n for line in text:\n if line.startswith('%Cpu(s)'):\n cpu_usage = line.split()[0]\n if process_list_started:\n process = line\n break\n if line.startswith(' PID USER'):\n process_list_started = True\n\n rows = process.split()\n current_process = ' '.join(rows[10:])\n output = '{} {}'.format(cpu_usage, current_process)\n worker.current_status = output\n worker.save()","sub_path":"pycfiles/mendelmd-1.2.4-py3.7/tasks.cpython-37.py","file_name":"tasks.cpython-37.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"87727862","text":"import json\nfrom flask import Flask, render_template, request\n\nfrom config import ES_INDEX\nfrom settings import es\n\n\napplication = Flask(__name__)\n\n\n@application.route('/')\ndef index():\n\tcoords = []\n\treturn render_template(\"welcome.html\",\n coords=json.dumps(coords),\n )\n\n\n@application.route('/category', methods=['GET'])\ndef category():\n\tif request.method == 'GET':\n\t\tcategory = request.args.get('category')\n\t\tes_data = es.search(index=ES_INDEX, body={\"query\": {\"match\": {\"text\": category}}}, size=600)\n\t\tcoords = []\n\t\tfor data in es_data['hits']['hits']:\n\t\t\tif len(data['_source']['coordinates']) > 0:\n\t\t\t\tgeo_data = data['_source']['coordinates'][0]['geometry']['location']\n\t\t\t\tlat = geo_data['lat']\n\t\t\t\tlng = geo_data['lng']\n\t\t\t\tcoords.append([lat, lng])\n\t\treturn render_template(\"twittmap.html\",\n\t coords=json.dumps(coords),\n\t )\n\n\nif __name__ == \"__main__\":\n application.run(debug=True)","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"36306858","text":"from . import main\nfrom flask import render_template, session, redirect, url_for, flash\nfrom datetime import datetime\nfrom forms import NameForm\nfrom app.models import User\nfrom app import db\n\n@main.route('/', methods = ['GET','POST'])\ndef index():\n\tform = NameForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=form.name.data).first()\n\t\tif user is None:\n\t\t\tuser = User(username=form.name.data)\n\t\t\tdb.session.add(user)\n\t\t\t#db.session.commit()\n\t\t\tsession['known'] = False\n\t\telse:\n\t\t\tsession['known'] = True\n\t\tsession['name'] = form.name.data\n\t\tform.name.data = ''\n\t\treturn redirect('/')\n\treturn render_template('index.html', current_time = datetime.utcnow(), form = form, name = session.get('name'), known = session.get('known', False))\n\n@main.route('/user/')\ndef user(name):\n\treturn render_template('user.html', name = name)\n\n@main.route('/life')\ndef life():\n\treturn render_template('life.html')\n\n@main.route('/programming')\ndef programming():\n\treturn render_template('programming.html')","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"342187334","text":"'''\nCreate an inverted index with given documents. Ensure that data does not include punctuation.\n\nExample\nGiven a list of documents with id and content. (class Document)\n[\n {\n \"id\": 1,\n \"content\": \"This is the content of document 1 it is very short\"\n },\n {\n \"id\": 2,\n \"content\": \"This is the content of document 2 it is very long bilabial bilabial heheh hahaha ...\"\n },\n]\n\nReturn an inverted index (HashMap with key is the word and value is a list of document ids).\n{\n \"This\": [1, 2],\n \"is\": [1, 2],\n ...\n}\n'''\n\n'''\nDefinition of Document\nclass Document:\n def __init__(self, id, cotent):\n self.id = id\n self.content = content\n'''\n\nclass Solution:\n # @param {Document[]} docs a list of documents\n # @return {dict(string, int[])} an inverted index\n def invertedIndex(self, docs):\n import collections, re\n ans = collections.defaultdict(list)\n\n for doc in docs:\n words = re.split(r'\\s+|[,;.]\\s*', doc.content)\n #words = re.split('\\W+', doc.content) #split by all non-words char, bug on word 'self-motivated'\n words = set(words)\n words.discard('')\n for w in words:\n ans[w].append(doc.id)\n return ans\n","sub_path":"Python/inverted-index.py","file_name":"inverted-index.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"52519168","text":"from tkinter import *\nfrom tkinter import PhotoImage\nimport sqlite3\nfrom tkinter import messagebox\nfrom time import strftime\nfrom datetime import date\nfrom subprocess import call\n\nhj = date.today()\ndias = ('Segunda-feira', 'Terça-feira', 'Quarta-feira', 'Quinta-feira', 'Sexta-feira', 'Sábado', 'Domingo')\nmes = {1:'janeiro', 2:'fevereiro', 3:'março', 4:'abril', 5:'maio', 6:'junho', 7:'julho', 8:'agosto', 9:'setembro', 10:'outubro', 11:'novembro', 12:'dezembro'}\njanela = Tk()\ndia_la = Label(janela,text=(dias[hj.weekday()]+','+ ' '+ str(hj.day)+' '+'de'+' '+str(mes[hj.month])+' '+'de'+' '+str(hj.year)), font='Helvita 50 bold', fg='blue')\ndia_la.place(x=200, y=750)\n\n\nrel = Label(janela,font= 'Helvita 50 bold', fg= 'blue')\nrel.place(x=1325,y=750)\ndef contador(): # funcao contador\n agora = strftime('%H:%M:%S')\n if rel['text'] != agora:\n rel['text'] = agora\n rel.after(100, contador)\ncontador()\n\nwelcome = Label(janela,text=['text'], font= 'Helvita 60 bold', fg= 'blue')\nwelcome.place(x=230, y=200)\ndef upwel():\n agora = strftime('%H:%M:%S')\n if agora <= str(12):\n welcome['text'] = ('Bom Dia !')\n elif agora <= str(18):\n welcome['text'] = ('Boa Tarde !')\n else:\n welcome['text'] = ('Boa Noite !')\n welcome.after(100, upwel)\nupwel()\n\njanela.title('Sistema Salão')\njanela.state('zoomed')\njanela.config()\njanela.geometry('900x600')\n\n\nclass Aplication:\n def __init__(self, master, *args, **kwargs):\n self.master = master\n\n self.frame1 = Frame(master, width=200, height=1500,\n bg='#222125',bd=5, relief='raise')\n self.frame1.pack(side=LEFT)\n\n self.label1 = Label(master, text='Barber Shop', font=('arial', 55, 'bold'))\n self.label1.pack()\n\n\n self.caixa = Button(self.frame1, text='Caixa', fg='#f97303',bg='#505157',\n bd=10, relief='raise', width=12, height=2,\n font=('comic sans ms', 15, 'bold'), command=self.home_caixa ).place(x=10, y=5)\n\n self.agenda = Button(self.frame1, text='Agenda', fg='#f97303',bg='#505157',\n bd=10, relief='raise', width=12, height=2,\n font=('comic sans ms', 15, 'bold'),command=self.agenda ).place(x=10, y=125)\n\n self.cadprod = Button(self.frame1, text='Cadastro\\nProduto', fg='#f97303',bg='#505157',\n bd=10, relief='raise', width=12, height=2,\n font=('comic sans ms',15, 'bold'),command=self.cadprod ).place(x=10, y=245)\n\n self.cadclient = Button(self.frame1, text='Cadastro\\nCliente', fg='#f97303',bg='#505157',\n bd=10, relief='raise', width=12, height=2,\n font=('comic sans ms', 15, 'bold'),command=self.cadcliente ).place(x=10, y=365)\n\n self.cadfornec = Button(self.frame1, text='Cadastro\\nFornecedor', fg='#f97303', bg='#505157',\n bd=10, relief='raise', width=12, height=2,\n font=('comic sans ms', 15, 'bold'),command=self.cadfornecedor ).place(x=10, y=490)\n\n self.aniversario = Button(self.frame1, text='Aniversários', fg='#f97303', bg='#505157',\n bd=10, relief='raise', width=12, height=2,\n font=('comic sans ms', 15, 'bold'), ).place(x=10, y=610)\n\n self.modelo = Button(self.frame1, text='Modelos', fg='#f97303', bg='#505157',\n bd=10, relief='raise', width=12, height=2,\n font=('comic sans ms', 15, 'bold'), ).place(x=10, y=730)\n\n\n self.foto = PhotoImage(file='img.gif')\n self.foto = self.foto.subsample(1, 1)\n self.label = Label(master, image=self.foto)\n self.label.pack()\n\n\n def home_caixa(self):\n call(['python','home_caixa.py'])\n\n def agenda(self):\n call(['python','agendar.py'])\n\n def cadprod(self):\n call(['python','cad_prod.py'])\n\n def cadcliente(self):\n call(['python','cad_cliente.py'])\n\n def cadfornecedor(self):\n call(['python','cad_fornecedor.py'])\n\n\n\napp = Aplication(janela)\njanela.mainloop()\n","sub_path":"main_doc.py","file_name":"main_doc.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"225100134","text":"\n\nfrom xai.brain.wordbase.nouns._smear import _SMEAR\n\n#calss header\nclass _SMEARING(_SMEAR, ):\n\tdef __init__(self,): \n\t\t_SMEAR.__init__(self)\n\t\tself.name = \"SMEARING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"smear\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_smearing.py","file_name":"_smearing.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"232795000","text":"# coding: utf-8\n__author__ = \"HanQian\"\n__email__ = \"hanqianops@163.com\"\n\nimport os\nimport platform\nimport re\nimport subprocess\nimport time\nimport traceback\n\nfrom lib.base import PluginInterface,BaseResponse\nfrom lib.execute_cmd import shell\nfrom lib.logger import LoggerHelper\n\nlog = LoggerHelper(__file__)\n\n\nclass LinuxSysInfo(PluginInterface):\n def __init__(self):\n self.ret = BaseResponse()\n def shell(cmd, timeout=None):\n \"\"\"\n 执行命令\n :param timeout: 命令超时时间\n :return: list\n \"\"\"\n wait = 0\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True, shell=True)\n\n if timeout:\n while True: # 检查命令是否完成\n if p.poll() == 0:\n return p.stdout.read()\n elif wait >= timeout:\n p.kill()\n return [\"CmdTimeOut: {0}\".format(cmd)]\n else:\n wait += 1\n time.sleep(1)\n return p.stdout.read()\n\n def result(self):\n \"\"\"返回采集的信息\"\"\"\n try:\n self.ret.data=self.process()\n log.run_log.info(self.ret.data)\n except Exception as e:\n msg = traceback.format_exc()\n log.error_log.error(msg)\n self.ret.status = False\n self.ret.error = msg\n return self.ret\n \n def process(self):\n \"\"\"处理采集到的结果,在子类中重写该方法\"\"\"\n return NotImplemented(\"必须定义 process 方法\")\n \n \nclass OsPlugin(LinuxSysInfo):\n \"\"\"操作系统信息\"\"\"\n def pocess(self):\n filter_keys = [\"Manufacturer\", \"Serial Number\", \"Product Name\", \"UUID\", \"Wake-up Type\"]\n raw_data = {}\n\n for key in filter_keys:\n try:\n cmd_res = shell(\"dmidecode -t system|grep '%s'\" % key)\n cmd_res = cmd_res.strip()\n\n res_to_list = cmd_res.split(\":\")\n if len(res_to_list) > 1: # the second one is wanted string\n raw_data[key] = res_to_list[1].strip()\n else:\n\n raw_data[key] = -1\n except Exception as e:\n print(e)\n raw_data[key] = -2 # means cmd went wrong\n\n data = {\"asset_type\": \"server\"}\n data[\"manufactory\"] = raw_data[\"Manufacturer\"]\n data[\"sn\"] = raw_data[\"Serial Number\"]\n data[\"model\"] = raw_data[\"Product Name\"]\n data[\"uuid\"] = raw_data[\"UUID\"]\n data[\"os\"] = platform.platform()\n return data\n\nclass CpuPlugin(LinuxSysInfo):\n \"\"\"CPU信息\"\"\"\n def process(self):\n base_cmd = \"cat /proc/cpuinfo\"\n\n raw_data = {\n \"cpu_moel\": \"%s |grep 'model name' |head -1 \" % base_cmd,\n \"cpu_count\": \"%s |grep 'processor'|wc -l\" % base_cmd,\n \"cpu_core_count\": \"%s |grep 'cpu cores' |awk -F: '{SUM +=$2} END {print SUM}'\" % base_cmd,\n }\n\n for k, cmd in raw_data.items():\n cmd_res = shell(cmd)\n raw_data[k] = cmd_res.strip()\n\n data = {\n \"cpu_count\": raw_data[\"cpu_count\"],\n \"cpu_core_count\": raw_data[\"cpu_core_count\"]\n }\n cpu_model = raw_data[\"cpu_model\"].split(\":\")\n if len(cpu_model) > 1:\n data[\"cpu_model\"] = cpu_model[1].strip()\n else:\n data[\"cpu_model\"] = -1\n\n return data\n\nclass NicPlugin(LinuxSysInfo):\n \"\"\"网卡信息\"\"\"\n def process(self):\n raw_data = shell(\"ifconfig -a\")\n raw_data = raw_data.split(\"\\n\")\n nic_dic = {}\n next_ip_line = False\n last_mac_addr = None\n for line in raw_data:\n if next_ip_line:\n # print last_mac_addr\n # print line #, last_mac_addr.strip()\n next_ip_line = False\n nic_name = last_mac_addr.split()[0]\n mac_addr = last_mac_addr.split(\"HWaddr\")[1].strip()\n raw_ip_addr = line.split(\"inet addr:\")\n raw_bcast = line.split(\"Bcast:\")\n raw_netmask = line.split(\"Mask:\")\n if len(raw_ip_addr) > 1: # has addr\n ip_addr = raw_ip_addr[1].split()[0]\n network = raw_bcast[1].split()[0]\n netmask = raw_netmask[1].split()[0]\n # print(ip_addr,network,netmask)\n else:\n ip_addr = None\n network = None\n netmask = None\n if mac_addr not in nic_dic:\n nic_dic[mac_addr] = {\"name\": nic_name,\n \"macaddress\": mac_addr,\n \"netmask\": netmask,\n \"network\": network,\n \"bonding\": 0,\n \"model\": \"unknown\",\n \"ipaddress\": ip_addr,\n }\n else: # mac already exist , must be boding address\n if \"%s_bonding_addr\" % (mac_addr) not in nic_dic:\n random_mac_addr = \"%s_bonding_addr\" % (mac_addr)\n else:\n random_mac_addr = \"%s_bonding_addr2\" % (mac_addr)\n\n nic_dic[random_mac_addr] = {\"name\": nic_name,\n \"macaddress\": random_mac_addr,\n \"netmask\": netmask,\n \"network\": network,\n \"bonding\": 1,\n \"model\": \"unknown\",\n \"ipaddress\": ip_addr,\n }\n\n if \"HWaddr\" in line:\n # print line\n next_ip_line = True\n last_mac_addr = line\n\n nic_list = []\n for k, v in nic_dic.items():\n nic_list.append(v)\n return nic_list\n\n# 输出异常, 待处理\nclass MemPlugin(LinuxSysInfo):\n \"\"\"内存信息\"\"\"\n def process(self):\n raw_data = shell(\"dmidecode -t 17\")\n raw_list = raw_data.split(\"\\n\")\n raw_ram_list = []\n item_list = []\n for line in raw_list:\n\n if line.startswith(\"Memory Device\"):\n raw_ram_list.append(item_list)\n item_list = []\n else:\n item_list.append(line.strip())\n\n ram_list = []\n for item in raw_ram_list:\n item_ram_size = 0\n ram_item_to_dic = {}\n for i in item:\n # print i\n data = i.split(\":\")\n if len(data) == 2:\n key, v = data\n\n if key == \"Size\":\n # print key ,v\n if v.strip() != \"No Module Installed\":\n ram_item_to_dic[\"capacity\"] = v.split()[0].strip() # e.g split \"1024 MB\"\n item_ram_size = int(v.split()[0])\n # print item_ram_size\n else:\n ram_item_to_dic[\"capacity\"] = 0\n\n if key == \"Type\":\n ram_item_to_dic[\"model\"] = v.strip()\n if key == \"Manufacturer\":\n ram_item_to_dic[\"manufactory\"] = v.strip()\n if key == \"Serial Number\":\n ram_item_to_dic[\"sn\"] = v.strip()\n if key == \"Asset Tag\":\n ram_item_to_dic[\"asset_tag\"] = v.strip()\n if key == \"Locator\":\n ram_item_to_dic[\"slot\"] = v.strip()\n\n if item_ram_size == 0: # empty slot , need to report this\n pass\n else:\n ram_list.append(ram_item_to_dic)\n\n raw_total_size = shell(\"cat /proc/meminfo|grep MemTotal \").split(\":\")\n ram_data = {\"ram\": ram_list}\n if len(raw_total_size) == 2: # correct\n\n total_mb_size = int(raw_total_size[1].split()[0]) / 1024\n ram_data[\"ram_size\"] = total_mb_size\n\n\n return ram_data\n\n# 输出异常, 待处理\nclass DiskPlugin(LinuxSysInfo):\n \"\"\"磁盘信息\"\"\"\n def process(self):\n data = self.linux()\n return data\n\n def linux(self):\n result = {\"physical_disk_driver\":[]}\n\n try:\n script_path = os.path.dirname(os.path.abspath(__file__))\n shell_command = \"sudo %s/MegaCli -PDList -aALL\" % script_path\n output = shell(shell_command)\n result[\"physical_disk_driver\"] = self.parse(output[1])\n except Exception as e:\n result[\"error\"] = e\n return result\n\n def parse(self,content):\n \"\"\"\n 解析shell命令返回结果\n :param content: shell 命令结果\n :return:解析后的结果\n \"\"\"\n response = []\n result = []\n for row_line in content.split(\"\\n\\n\\n\\n\"):\n result.append(row_line)\n for item in result:\n temp_dict = {}\n for row in item.split(\"\\n\"):\n if not row.strip():\n continue\n if len(row.split(\":\")) != 2:\n continue\n key,value = row.split(\":\")\n name =self.mega_patter_match(key)\n if name:\n if key == \"Raw Size\":\n raw_size = re.search(\"(\\d+\\.\\d+)\",value.strip())\n if raw_size:\n\n temp_dict[name] = raw_size.group()\n else:\n raw_size = \"0\"\n else:\n temp_dict[name] = value.strip()\n\n if temp_dict:\n response.append(temp_dict)\n return response\n\n def mega_patter_match(self,needle):\n grep_pattern = {\"Slot\":\"slot\", \"Raw Size\":\"capacity\", \"Inquiry\":\"model\", \"PD Type\":\"iface_type\"}\n for key,value in grep_pattern.items():\n if needle.startswith(key):\n return value\n return False\n\n\n","sub_path":"src/plugins/Linux.py","file_name":"Linux.py","file_ext":"py","file_size_in_byte":10356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"520577272","text":"# Encryptor GUI\n\nimport tkinter as tk\nfrom Encryptor import Encryptor\n\nclass Application(tk.Frame):\n\n def __init__(self, master):\n super(Application, self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n tk.Label(self,\n text=\"Choose a text file\"\n ).grid(row=0, column=0, columnspan=2, sticky=tk.W)\n\n self.cipher_file = tk.StringVar()\n self.cipher_file.set(None)\n\n tk.Radiobutton(self, text=\"cipher1.txt\",\n variable=self.cipher_file,\n value=\"cipher1.txt\",\n command=self.choose_file\n ).grid(row=1, column=0, sticky=tk.W)\n tk.Radiobutton(self, text=\"cipher2.txt\",\n variable=self.cipher_file,\n value=\"cipher2.txt\",\n command=self.choose_file\n ).grid(row=1, column=1, sticky=tk.W)\n\n tk.Label(self,\n text=\"Enter the message:\"\n ).grid(row=2, column=0, columnspan=2, sticky=tk.W)\n\n self.msg = tk.Text(self, width=100, height=10, wrap=tk.WORD)\n self.msg.grid(row=3, column=0, columnspan=2, sticky=tk.W)\n\n tk.Button(self,\n text=\"Encrypt\",\n command=self.encrypt\n ).grid(row=4, column=0, sticky=tk.W)\n\n tk.Button(self,\n text=\"Decrypt\",\n command=self.decrypt\n ).grid(row=4, column=1, sticky=tk.W)\n\n tk.Label(self,\n text=\"New Message:\"\n ).grid(row=5, column=0, columnspan=2, sticky=tk.W)\n\n self.out_msg = tk.Text(self, width=100, height=10, wrap=tk.WORD)\n self.out_msg.grid(row=6, column=0, columnspan=2, sticky=tk.W)\n\n def choose_file(self):\n self.e = Encryptor(self.cipher_file.get())\n\n def encrypt(self):\n message = self.msg.get(0.0, tk.END)\n encrypted = self.e.encrypt_message(message)\n\n self.out_msg.delete(0.0, tk.END)\n self.out_msg.insert(0.0, encrypted)\n\n def decrypt(self):\n message = self.msg.get(0.0, tk.END)\n decrypted = self.e.decrypt_message(message)\n\n self.out_msg.delete(0.0, tk.END)\n self.out_msg.insert(0.0, decrypted)\n\nroot = tk.Tk()\nroot.title(\"Encryptor/Decryptor\")\nroot.geometry(\"700x500\")\napp = Application(root)\nroot.mainloop()","sub_path":"Python/GUI/EncryptorGUI.py","file_name":"EncryptorGUI.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"74130019","text":"import pandas\nfrom utils import reports\n\n\ndef compare_by_composer(df, method, filter_by=None):\n result = {}\n\n for composer in df.composer.unique():\n ason = df[df.composer == composer]\n\n if filter_by:\n son = ason[ason[method].str.contains(filter_by)]\n else:\n son = ason\n\n result[composer] = son[method].value_counts().to_dict()\n\n ndf = pandas.DataFrame(result)\n ndf.index.name = 'Sonority'\n ndf = ndf.fillna(0)\n\n # make the column names shorter\n ndf.columns = [x[0:5] for x in ndf.columns.values]\n\n for column in ndf.columns.values:\n ndf[column + '%'] = ndf[column]/ndf[column].sum() * 100\n\n return ndf.sort('beeth', ascending=False)\n\n\ndef main(df, args):\n size = len(df.composer.unique())\n\n nf = compare_by_composer(df, \"normal_form_string\")\n intervals = compare_by_composer(df, \"intervals_string\")\n intervals_diss = compare_by_composer(df, \"intervals_string\", filter_by=\"A|d\")\n super_diss = compare_by_composer(df, \"intervals_string\", filter_by=\"AA|dd\")\n\n reports.comparison_method(nf, size, \"normal-form\")\n reports.comparison_method(intervals, size, \"intervals\")\n reports.comparison_method(intervals_diss, size, \"dissonant-intervals\")\n reports.comparison_method(super_diss, size, \"super-dissonant-intervals\")\n\n\nopt_map = (('compare-composers', True, True, False, 'Compare sonorities in all composers'),)\n","sub_path":"analysis/compare_composers.py","file_name":"compare_composers.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"85799877","text":"#місто село к середніх\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom scipy.stats import f_oneway\ndata = pd.read_csv('1.csv', sep=';')\n\nprint(data.head())\n# зображення даних на графіку\nplt.scatter(data['aUrban_1'], data['aRural_1'], c = 'b')\nplt.title('Data Urban/Rural')\nplt.xlabel('Urban')\nplt.ylabel('Rural')\nplt.show()\n\nX = data.iloc[:, [0, 3, 4]].values\n# метод ліктя\nar = []\nfor i in range(1,12):\n kmeans = KMeans(n_clusters= i, init= 'k-means++', random_state= 42)\n kmeans.fit(X[:,[1,2]])\n ar.append(kmeans.inertia_)\nplt.plot(range(1,12), ar)\nplt.title('The Elbow Method')\nplt.xlabel('Number of clusters')\nplt.ylabel('data')\nplt.show()\n#розбиваємо на кластери\nkmeans = KMeans(n_clusters= 3, init= 'k-means++', random_state= 42)\ny_kmeans = kmeans.fit_predict(X[:,[1,2]])\n# виводимо отримані дані на графік\nplt.scatter(X[y_kmeans == 0, 1], X[y_kmeans == 0,2], s = 100,c = 'b', label = 'The best')\nplt.scatter(X[y_kmeans == 1, 1], X[y_kmeans == 1,2], s = 100,c = 'c', label = 'Worst')\nplt.scatter(X[y_kmeans == 2, 1], X[y_kmeans == 2,2], s = 100,c = 'y', label = 'Average')\nplt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s=300, c = 'm', label ='centers')\nplt.title('Clusters of countries k-means')\nplt.xlabel('Urban')\nplt.ylabel('Rural')\nplt.legend()\nplt.show()\n# виводимо списки країн\nprint('The best:')\nfor i in X[y_kmeans == 0, 0]:\n print(i)\nprint(\" \")\nprint('Average:')\nfor i in X[y_kmeans == 2, 0]:\n print(i)\nprint(\" \")\nprint('Worst:')\nfor i in X[y_kmeans == 1, 0]:\n print(i)\n# однофакорний дисперсний аналіз по групам країн\nF, p = f_oneway(X[y_kmeans == 0, 1], X[y_kmeans == 0,2])\nprint(\"The Best countries:\")\nprint(np.round(F,2))\nprint(\"p-фактор \" + str(np.round(p,2)))\nprint(\"Average countries:\")\nF, p = f_oneway(X[y_kmeans == 2, 1], X[y_kmeans == 2,2])\nprint(np.round(F,2))\nprint(\"p-фактор \" + str(np.round(p,2)))\nprint(\"Worst countries:\")\nF, p = f_oneway(X[y_kmeans == 1, 1], X[y_kmeans == 1,2])\nprint(np.round(F,2))\nprint(\"p-фактор \" + str(np.round(p,2)))","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"202274749","text":"changes_file = 'changes_python.txt'\ndata = [line.strip() for line in open(changes_file, 'r')]\nsep = 72*'-'\n\n\nclass Commit:\n 'class for commits'\n \n def __init__(self, revision=None, author=None, fulldate=None, month=None, week=None, date=None, comment_line_count=None, changes=None,\n comment=None, tbldconfig=0, tpixel=0, tgradle=0, tjava=0, txml=0, toth=0, countM=0, countA=0, countD=0):\n self.revision = revision\n self.author = author\n self.fulldate = fulldate\n self.month = month\n self.week = week\n self.date = date\n self.comment_line_count = comment_line_count\n self.changes = changes\n self.comment = comment\n self.tbldconfig = tbldconfig\n self.tpixel = tpixel\n self.tgradle = tgradle\n self.tjava = tjava\n self.txml = txml\n self.toth = toth\n self.countM = countM\n self.countA = countA\n self.countD = countD\n def get_commit_list(self):\n import time\n import datetime\n changes_file = 'changes_python.txt'\n my_file = open(changes_file, 'r')\n data = [line.strip() for line in open(changes_file, 'r')]\n sep = 72*'-'\n commits = []\n index = 0 \n while True:\n try:\n tbldconfig=0\n tpixel=0\n tgradle=0\n tjava=0\n txml=0\n toth=0\n countM = 0\n countA = 0\n countD = 0\n details = data[index + 1].split('|')\n revision = int(details[0].strip().strip('r'))\n author = details[1].strip()\n fulldate = details[2].strip()\n year = int((details[2][0:5]).strip())\n month = int((details[2][6:8]).strip())\n date = int((details[2][9:11]).strip())\n mydate = datetime.date(year,month, date) #year, month, day\n week = (mydate.strftime(\"%W\"))\n comment_line_count = int(details[3].strip().split(' ')[0])\n changes = data[index+2:data.index('',index+1)]\n for change in changes:\n if \"build-config\" in str(change):\n tbldconfig = tbldconfig + 1\n # print typebldconfig, revision, change\n elif \"dpi\" in str(change):\n tpixel = tpixel + 1\n elif \"600dp\" in str(change):\n tpixel = tpixel + 1\n elif \"gradle\" in str(change):\n tgradle = tgradle + 1\n elif \"java\" in str(change):\n tjava = tjava + 1\n elif \"xml\" in str(change):\n txml = txml + 1\n else:\n toth = toth+1\n for change in changes:\n if change[0] == \"M\":\n countM = countM+1\n elif change[0] == \"A\":\n countA = countA+1\n elif change[0] == \"D\":\n countD = countD +1\n index = data.index(sep, index + 1)\n comment = data[index-comment_line_count:index]\n # print type(comment)\n # The object which contains the conveniently misspelt word \"Foother\" can be used for testing\n # It has two lines of comments so can check that both are captured\n # It contains three references to 'Modify' changes so this can also be checked\n # Contains two paths in the changes section that relate to xml type and one that relates to java\n # if \"Foother\" in str(comment):\n # print comment\n # print typexml\n # print typejava\n # print countA\n # print countM\n # print countD\n # break\n w = (author,countD)\n commits.append(w) \n commits.sort(key=lambda s:(s[0],s[1]),reverse=True)\n except IndexError:\n break\n # print \"finished\"\n f1 = open(\"testingtesting8.csv\", \"w\")\n for c in commits:\n #print c[1]\n #f1.write(c[0]+\",\"+str(c[1]))\n f1.write(c[0]+\",\"+str(c[1])+'\\n')\n f1.close\n\n\ncommits = Commit()\ncommits.get_commit_list()\n\n\n#########################################################\n\n\n\n \n","sub_path":"14-processchanges/process_changes8.py","file_name":"process_changes8.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"172974409","text":"from rivr.http import Http404\n\nclass Array(object):\n def __init__(self, *views):\n self.views = views\n \n def __call__(self, request):\n for view in self.views:\n try:\n response = view(request)\n if response:\n return response\n except Http404:\n continue\n \n raise Http404\n","sub_path":"rivr/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"202981541","text":"import numpy as np\nimport helper.helper as h\nfrom scipy import linalg\nfrom collections import OrderedDict\n\ndef main():\n fA = np.array(h.Helper.getMatrix('matrix.dat'), dtype = np.float32)\n dA = np.array(h.Helper.getMatrix('matrix.dat'), dtype = np.float64)\n fP = np.array([1.0000000, 0.0156250, 0.0000000], dtype = np.float32)\n dP = np.array([1.0000000, 0.0156250, 0.0000000], dtype = np.float64)\n fEps = np.finfo(np.float32).eps\n dEps = np.finfo(np.float64).eps\n\n with open('output.dat', 'a') as output:\n print('\\n--------------------Metadata---------------------', file=output)\n for k, v in h.Helper.getMeta('Kirill', 'Telegin', '3430302/80004').items():\n print(k, v, file=output)\n print(\"\\nMatrix\", file=output)\n print(fA, file=output)\n print('\\n----------------Single precision------------------', file=output)\n for i in range(len(fP)):\n print(\"====================================================\", file=output)\n for k, v in getDesicion(fA,fP[i],fEps).items():\n print(k, v, file=output)\n print(\"====================================================\", file=output)\n\n with open('output.dat', 'a') as output:\n print('\\n-----------Double precision------------', file=output)\n for i in range(len(dP)):\n print(\"====================================================\", file=output)\n for k, v in getDesicion(dA,dP[i],dEps).items():\n print(k, v, file=output)\n print(\"====================================================\", file=output)\n\n\ndef ortVector(array):\n with open('output.dat', 'a') as output:\n print(\"\\nОртогональность\", file=output)\n bufOrtVect = np.zeros(shape=(len(array),len(array)))\n for i in range(len(array)):\n for j in range(len(array)):\n if(i == j):\n continue\n else:\n bufOrtVect[i][j] = np.dot(array[i], array[j])\n print(bufOrtVect, file=output)\n\ndef indexPerfomance(A, eigVect, eigValue, eps):\n bufIP = np.zeros(shape=(len(eigValue)))\n for i in range(len(eigValue)):\n bufIP[i] = np.linalg.norm(np.dot(A,eigVect[i]) - eigValue[i]*eigVect[i])/(len(eigVect)*eps*np.linalg.norm(A)*np.linalg.norm(eigVect))\n return max(bufIP)\n\ndef vectN(A,B):\n bufR = np.zeros(shape=(len(B[1]),len(B[1])))\n i = -1\n for row in B[1]:\n i += 1\n bufR[i] = np.dot(B[0],row) - np.dot(A,row)\n return bufR\n\ndef getDesicion(A, P, eps):\n A = h.Helper.paramAddArray(P, A, 0, 0)\n B = np.linalg.eig(A)\n allValue = OrderedDict({\n 'Current parameter value\\n': P,\n 'Index perfomance\\n': indexPerfomance(A,B[1],B[0], eps),\n 'Eigenvalues\\n': B[0],\n 'Eigenvectors\\n': B[1],\n 'Redisual vectors\\n': vectN(A,B),\n })\n return allValue\n\nmain()\n","sub_path":"6lab/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"419852011","text":"\r\nfrom instrumental import instrument, list_instruments\r\n#from instrumental.drivers.cameras import uc480\r\nfrom instrumental.drivers.cameras import uc480\r\nimport numpy as np\r\nimport time as Time\r\nimport matplotlib.pyplot as plt\r\nimport xarray as xr\r\nimport os\r\nimport time as Time\r\nfrom PyQt5 import QtCore\r\n\r\n\r\n''' \r\n ---------------------------------------------> X (1280)\r\n | \r\n | ______Width________\r\n | | |\r\n | | |\r\n | High |\r\n | | |\r\n | |___________________| \r\n |\r\n\r\n Y\r\n\r\n (1024)\r\n\r\n'''\r\n\r\n\r\nclass ThorlabsCamer():\r\n def __init__(self):\r\n\r\n self.insts = list_instruments()\r\n print(self.insts)\r\n \r\n\r\n def ConnectCamera(self):\r\n self.cam = instrument(self.insts[0])\r\n print(self.cam)\r\n\r\n\r\n\r\n def SetCamera(self, yshift = 0, xshift = 0, hight = 1024, width = 1280, exposeTime = \"0.01[ms]\"):\r\n self.yshift = yshift\r\n self.xshift = xshift\r\n\r\n self.hight = hight\r\n self.width = width\r\n\r\n self.exposure_time = str(exposeTime/1000.0) + \"[ms]\"\r\n\r\n print(\"(yshift , xshift , hight, width) is: \", yshift, xshift, hight, width)\r\n print(\"Camera Set !\")\r\n\r\n\r\n def SingleImageData(self, infoObjSingle):\r\n try:\r\n self.cam.start_capture(left = self.xshift, right = self.xshift + self.width, top = self.yshift,\r\n bot = self.yshift + self.hight, exposure_time = self.exposure_time)\r\n image = self.cam.get_captured_image(timeout='1s', copy=True)\r\n except Exception:\r\n print(\"ERROR OCCURE !\")\r\n infoObjSingle.append(\"ERROR OCCURE !\")\r\n\r\n else:\r\n return image\r\n\r\n\r\n def MultiImageData(self, infoObj, frame_number_expected = 100, segment_frame = 50):\r\n\r\n t0 = Time.time()\r\n\r\n for j in range(int(frame_number_expected / segment_frame)):\r\n # Initialized the image data for each segment_fame\r\n self.data = self.SingleImageData(infoObj)\r\n print(\"The {}th segment\".format(j))\r\n\r\n for i in range(segment_frame - 1): # because that initial data is not an empty space.\r\n\r\n data_temp = self.SingleImageData(infoObj)\r\n\r\n # Time.sleep(0.05)\r\n self.data = np.append(self.data, data_temp, axis=0)\r\n\r\n print(\"the data shape is:\", self.data.shape)\r\n infoObj.setTextColor(QtCore.Qt.green)\r\n infoObj.append(\"the data shape is:\" + str(self.data.shape))\r\n np.save('camera_{}_{}'.format(frame_number_expected, j), self.data.astype(np.uint8))\r\n del self.data\r\n \r\n infoObj.append(\"Time consumed to save the data is:\" + str(Time.time() - t0) )\r\n\r\n print(\"Time consumed to save the data is:\", Time.time() - t0)\r\n\r\n\r\n\r\nclass ReadData():\r\n\r\n def __init__(self, noteObj, frameNumber, segmentFrame, width, hight, fileName = \"NoGlass\"):\r\n\r\n self.frameNumber = frameNumber\r\n self.segmentFrame = segmentFrame\r\n self.width = width\r\n self.hight = hight\r\n self.fileName = fileName\r\n\r\n self.image = np.load('camera_{}_0.npy'.format(self.frameNumber))\r\n print(\"The segment data shape is: \", self.image.shape)\r\n self.noteObj = noteObj\r\n #self.noteObj.appendPlainText(\"the data has saved as .nc file! \")\r\n\r\n\r\n def ImageData(self):\r\n\r\n for j in range(1, int(int(self.frameNumber / self.segmentFrame))):\r\n temp_data = np.load('camera_{}_{}.npy'.format(self.frameNumber, j))\r\n # print(temp_data.shape)\r\n self.image = np.append(self.image, temp_data, axis=0)\r\n\r\n self.image = np.reshape(self.image, [self.frameNumber, self.hight, self.width])\r\n\r\n print(\"The dataForSave shape is: \", self.image.shape)\r\n\r\n #exposure_time = self.expose_spinbox.value()\r\n exposureTime = 1\r\n\r\n ## note that here we have change the data type uint8 --> int16\r\n \r\n ds = xr.Dataset({'CameraMatrix': (['frameNumber', 'hight', 'width'], self.image.astype(np.int16))},\r\n attrs={'frameNumber': self.frameNumber, \r\n 'width':self.width,\r\n 'hight':self.hight,\r\n 'exposure_time': exposureTime, \r\n \"note\":self.noteObj.toPlainText()}\r\n )\r\n\r\n\r\n print(self.noteObj.toPlainText())\r\n \r\n return ds \r\n \r\n #ds.to_netcdf(self.fileName + '.nc')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n cam = ThorlabsCamer()\r\n cam.ConnectCamera()\r\n cam.SetCamera()\r\n data = cam.SingleImageData()\r\n cam.MultiImageData()\r\n\r\n plt.subplot(111)\r\n plt.imshow(data)\r\n plt.colorbar()\r\n\r\n #plt.savefig('oneframe.eps', format='eps', dpi=300)\r\n\r\n plt.show()","sub_path":"ThorlabsCamera.py","file_name":"ThorlabsCamera.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"625925258","text":"import requests\nimport json\n\nimport sasoptpy as so\nfrom sasoptpy.api import api\nso.reset_globals()\n\ndef test(cashost, port):\n\n # Start server\n api.start(thread=True, host='127.0.0.1', port=5000)\n\n host = 'http://127.0.0.1:5000'\n\n # Get server and version info\n res = requests.get(host)\n\n # Create new workspace\n res = requests.post(host + '/workspaces',\n data={'name': 'myworkspace',\n 'password': 12345})\n\n # If workspace exists, renew the token\n res = requests.post(host + '/workspaces/myworkspace',\n data={'password': 12345})\n\n # Save the token\n token = 'Bearer ' + res.json()['token']\n headers = {'Authorization': token}\n\n # Clean workspace\n res = requests.post(host, data={'action': 'clean'}, headers=headers)\n\n # Create a new CAS session\n res = requests.post(host + '/sessions', headers=headers,\n data={'name': 'mycas', 'host': cashost, 'port': port})\n\n # Create a new model\n res = requests.post(host + '/models', headers=headers,\n data={'name': 'knapsack', 'session': 'mycas'})\n\n # Create variables\n res = requests.post(host + '/models/knapsack/variable_groups', headers=headers,\n json={'name': 'pick', 'index': [[\"pen\",\"watch\",\"cup\"]], 'vartype': 'integer'})\n\n # Set objective function\n res = requests.post(host + '/models/knapsack/objectives', headers=headers,\n json={'expression': \"5*pick['pen']+20*pick['watch']+2*pick['cup']\", 'sense': 'maximize',\n 'name': 'total_value'})\n\n # Capacity constraint\n res = requests.post(host + '/models/knapsack/constraints', headers=headers,\n json={'expression': \"1*pick['pen']+3*pick['watch']+10*pick['cup']<=22\", 'name': 'total_weight'})\n\n # Individual limits for items\n res = requests.post(host + '/models/knapsack/constraint_groups', headers=headers,\n json={\n 'expression': 'pick[i]<=5', 'index': \"for i in ['pen','watch','cup']\",\n 'name': 'bounds'})\n\n # Get optmodel code of the model\n res = requests.get(host+'/models/knapsack', headers=headers,\n params={'format': 'optmodel'})\n print(res.json()['optmodel'])\n\n # Solve the model\n res = requests.post(host + '/models/knapsack/solutions', headers=headers,\n data={'stream': False})\n sols = res.json()['solutions']\n for i in sols:\n print(i, sols[i])\n","sub_path":"examples/rest_knapsack.py","file_name":"rest_knapsack.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"179134173","text":"def countChar (sentence, char):\n\tcount = 0\n\tfor i in range (len(sentence)-1):\n\t\tif (sentence[i] == char):\n\t\t\tif(sentence[i+1] == char):\n\t\t\t\tcount += 1\n\treturn count\n\nfrase = \"\"\n\nwhile (True):\n\tfrase = input(\"Digite uma frase contendo palavras separadas por um unico espaco em branco:\")\n\n\tif(countChar(frase, \" \") == 0):\n\t\tbreak\n\telse:\n\t\tprint(\"Frase com mais um espaco seguido.\")\n\nwhile (True):\n\tletra = input(\"Digite uma letra a sua escolha para o programa exibir quantas vezes esta letra ocorre na frase: (Digite @ para encerrar a execucao:)\")\n\tif (letra == \"@\"):\n\t\tbreak\n\tcount = 0\n\tfor i in range (len(frase)-1):\n\t\tif (frase[i]==letra):\n\t\t\tcount += 1\n\tprint(\"A letra aparece\", count, \"vezes na frase.\")","sub_path":"2019/1/ICC/python/string/exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"180703729","text":"import urllib.request\nimport json\n\nuser_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'\n\n \ndef get_quote_json():\n url = 'http://quotesondesign.com/wp-json/posts?filter[orderby]=rand&filter[posts_per_page]=1'\n headers = {'User-Agent': user_agent}\n req = urllib.request.Request(url, headers=headers)\n with urllib.request.urlopen(req) as response:\n json_response = json.loads(response.read().decode('utf-8').replace(\"'\",'\"'))\n return json_response","sub_path":"app/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"465126757","text":"\"\"\"\nTODO:\n- compute number of parameters\n- implement a global pooling layer (as in GoogLeNet / ResNet)\n- several models\nDONE\n- cumulated computing time (by epochs) instead of just one number\n- batch normalization: https://keras.io/layers/normalization/\n\n\"\"\"\nimport time\nimport struct\nimport functools\n\nimport numpy as np\nimport matplotlib . pyplot as pyplot\n\nimport tensorflow as tf\n\n# constants\nmnist_image_shape = (28, 28, 1)\nmnist_input_size = functools . reduce (lambda a, b : a * b, mnist_image_shape)\nmnist_output_size = 10\n\nmode = \"simple\"\n\n# input params\nnb_epochs = 40\n\nsimple_convolution_layers_params = [\n {\n \"nb filters\" : 32,\n \"filter size\" : (3, 3),\n \"strides\" : (1, 1),\n \"batch normalization\" : False,\n \"activation\" : \"relu\",\n \"max pooling size\" : (2, 2),\n \"dropout rate\" : 0.25,\n },\n {\n \"nb filters\" : 128,\n \"filter size\" : (3, 3),\n \"strides\" : (1, 1),\n \"batch normalization\" : True,\n \"activation\" : \"relu\",\n \"max pooling size\" : (2, 2),\n \"dropout rate\" : 0.25,\n },\n]\nsimple_dense_layers_params = [\n {\n \"size\" : 70,\n \"activation\" : \"relu\",\n },\n {\n \"size\" : 58,\n \"activation\" : \"relu\",\n },\n]\n\nsimple_training_params = {\n \"nb epochs\" : nb_epochs,\n \"batch size\" : 64,\n}\n\n# kaggle example\n\nnb_epochs = 40\nkaggle_convolution_layers_params = [\n {\n \"nb filters\" : 24,\n \"filter size\" : (5, 5),\n \"strides\" : (1, 1),\n \"batch normalization\" : True,\n \"activation\" : \"relu\",\n \"max pooling size\" : (2, 2),\n \"dropout rate\" : 0.25,\n },\n {\n \"nb filters\" : 48,\n \"filter size\" : (5, 5),\n \"strides\" : (1, 1),\n \"batch normalization\" : True,\n \"activation\" : \"relu\",\n \"max pooling size\" : (2, 2),\n \"dropout rate\" : 0.25,\n },\n]\nkaggle_dense_layers_params = [\n {\n \"size\" : 256,\n \"activation\" : \"relu\",\n },\n]\n\nkaggle_training_params = {\n \"nb epochs\" : nb_epochs,\n \"batch size\" : 64,\n}\n\n# test mode\n\ntest_convolution_layers_params = [\n {\n \"nb filters\" : 10,\n \"filter size\" : (3, 3),\n \"strides\" : (1, 1),\n \"activation\" : \"relu\",\n \"batch normalization\" : False,\n \"max pooling size\" : (2, 2),\n \"dropout rate\" : 0.25,\n },\n {\n \"nb filters\" : 10,\n \"filter size\" : (3, 3),\n \"strides\" : (1, 1),\n \"activation\" : \"relu\",\n \"batch normalization\" : True,\n \"max pooling size\" : (2, 2),\n \"dropout rate\" : 0.25,\n },\n]\ntest_dense_layers_params = [\n {\n \"size\" : 70,\n \"activation\" : \"relu\",\n },\n {\n \"size\" : 58,\n \"activation\" : \"relu\",\n },\n]\ntest_training_params = {\n \"nb epochs\" : 2,\n \"batch size\" : 64,\n}\n\n\n# functions\n\n\ndef compute_number_of_parameters (model_params):\n # note: biases\n nb_params = 0\n data_shape = mnist_image_shape\n for conv_params in model_params [\"conv layers\"]:\n nb_conv_params = conv_params [\"nb filters\"] * (conv_params [\"filter size\"] [0] * conv_params [\"filter size\"] [0] * data_shape [2] + 1) # + 1 bias\n nb_params += nb_conv_params\n if (conv_params [\"batch normalization\"]):\n nb_params += conv_params [\"nb filters\"] * 4\n data_shape = [ (data_shape [i] - conv_params [\"filter size\"] [i] + 1) // conv_params [\"max pooling size\"] [i] for i in range (2) ] + [ conv_params [\"nb filters\"], ]\n prev_size = data_shape [0] * data_shape [1] * data_shape [2]\n for dense_params in model_params [\"dense layers\"]:\n nb_dense_params = dense_params [\"size\"] * (prev_size + 1)\n nb_params += nb_dense_params\n prev_size = dense_params [\"size\"]\n nb_dense_params = 10 * (prev_size + 1)\n nb_params += nb_dense_params\n return nb_params\n \n\ndef build_conv_block (model, conv_block_params, input_shape = None):\n\n if (input_shape is None):\n model . add (tf . keras . layers . Conv2D (conv_block_params [\"nb filters\"], conv_block_params [\"filter size\"], activation = conv_block_params [\"activation\"], data_format = \"channels_last\"))\n else:\n model . add (tf . keras . layers . Conv2D (conv_block_params [\"nb filters\"], conv_block_params [\"filter size\"], activation = conv_block_params [\"activation\"], input_shape = input_shape, data_format = \"channels_last\"))\n\n try:\n if (conv_block_params [\"batch normalization\"]):\n model . add (tf . keras . layers . BatchNormalization (axis = 3)) # channels last\n except (KeyError):\n # no batch normalization\n pass\n\n try:\n model . add (tf . keras . layers . MaxPooling2D (pool_size = conv_block_params [\"max pooling size\"]))\n except (KeyError):\n # no max pooling\n pass\n\n try:\n model . add (tf . keras . layers . Dropout (conv_block_params [\"dropout rate\"]))\n except (KeyError):\n # no dropout\n pass\n\n\n\ndef build_dense_layer (model, dense_layer_params):\n model . add (tf . keras . layers . Dense (dense_layer_params [\"size\"], activation = dense_layer_params [\"activation\"]))\n\n\ndef build_model (model_params):\n model = tf . keras . Sequential ()\n conv_layers_params = model_params [\"conv layers\"]\n build_conv_block (model, conv_layers_params [0], input_shape = model_params [\"input shape\"])\n for conv_block_params in conv_layers_params [ 1 : ]:\n build_conv_block (model, conv_block_params)\n model . add (tf . keras . layers . Flatten ())\n for dense_layer_params in model_params [\"dense layers\"]:\n build_dense_layer (model, dense_layer_params)\n model . add (tf . keras . layers . Dense (mnist_output_size, activation = \"softmax\"))\n return model\n\n\n\ndef read_the_mnist_data (data_set_name):\n # http://yann.lecun.com/exdb/mnist/ (idx format)\n # https://stackoverflow.com/questions/39969045/parsing-yann-lecuns-mnist-idx-file-format\n # (in particular: https://stackoverflow.com/a/53181925/2148753)\n data_set_dir = \"../mnist/\"\n images_file_name = data_set_dir + data_set_name + \"-images-idx3-ubyte\"\n labels_file_name = data_set_dir + data_set_name + \"-labels-idx1-ubyte\"\n with open (images_file_name, \"rb\") as images_file:\n magic, nb_images = struct . unpack (\">II\", images_file . read (8))\n if (magic != 2051):\n raise Exception (\"wrong file\")\n nb_rows, nb_cols = struct . unpack (\">II\", images_file . read (8))\n image_shape = (nb_rows, nb_cols, 1) # channels last\n images = np . fromfile (images_file, dtype = np . dtype (np . uint8) . newbyteorder (\">\")) . astype (np . float32) / 255.\n images = images . reshape ((nb_images, ) + image_shape)\n with open (labels_file_name, \"rb\") as labels_file:\n magic, nb_labels = struct . unpack (\">II\", labels_file . read (8))\n if (magic != 2049):\n raise Exception (\"wrong file\")\n if (nb_labels != nb_images):\n raise Exception (\"nbr of labels is not equal to number of images\")\n labels = np . fromfile (labels_file, dtype = np . dtype (np . uint8) . newbyteorder (\">\"))\n labels = labels . reshape ((nb_images, ))\n return nb_images, images, labels\n\n\n\"\"\"\nhttps://stackoverflow.com/questions/43178668/record-the-computation-time-for-each-epoch-in-keras-during-model-fit\n(Marcin Możejko)\n\"\"\"\nclass TimeAndEvaluationCallback (tf . keras . callbacks . Callback):\n\n def on_train_begin (self, logs = {}):\n self . training_times = []\n self . test_loss = []\n self . test_acc = []\n\n def on_epoch_begin (self, epoch, logs = {}):\n self . epoch_start_time = time . time ()\n\n def on_epoch_end (self, epoch, logs = {}):\n self . training_times . append (time . time () - self . epoch_start_time)\n loss, acc = self . model . evaluate (test_images, test_labels, batch_size = self . params [\"batch_size\"])\n self . test_loss . append (loss)\n self . test_acc . append (acc)\n\n\n# derived parameters \n\nif (mode == \"test\"):\n convolution_layers_params = test_convolution_layers_params\n dense_layers_params = test_dense_layers_params\n training_params = test_training_params\nelif (mode == \"simple\"):\n convolution_layers_params = simple_convolution_layers_params\n dense_layers_params = simple_dense_layers_params\n training_params = simple_training_params\nelif (mode == \"kaggle\"):\n convolution_layers_params = kaggle_convolution_layers_params\n dense_layers_params = kaggle_dense_layers_params\n training_params = kaggle_training_params\n\nmodel_params = {\n \"input shape\" : mnist_image_shape,\n \"conv layers\" : convolution_layers_params,\n \"dense layers\" : dense_layers_params,\n}\n\nnb_convolution_layers = len (model_params [\"conv layers\"])\nif (nb_convolution_layers == 0):\n raise Exception (\"no conv?\")\nnb_hidden_dense_layers = len (model_params [\"dense layers\"])\n\n\n# main\n\nnb_train_images, train_images, train_labels = read_the_mnist_data (\"train\")\nnb_test_images, test_images, test_labels = read_the_mnist_data (\"t10k\")\nprint (\"Nbr train images: \" + str (nb_train_images))\nprint (\"Nbr test images: \" + str (nb_test_images))\n\n#print (train_images [0] . shape)\n\n\n\nmodel = build_model (model_params)\n\noptimizer = tf . keras . optimizers . SGD (lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)\nmodel . compile (loss = \"sparse_categorical_crossentropy\", optimizer = optimizer, metrics = [ \"accuracy\", ])\n\nnb_params = compute_number_of_parameters (model_params)\nprint (\"computed nb params=\" + str (nb_params))\n\n\ntiming_and_evaluation_callback = TimeAndEvaluationCallback ()\ntrain_history = model . fit (train_images, train_labels, epochs = training_params [\"nb epochs\"], batch_size = training_params [\"batch size\"], callbacks = [ timing_and_evaluation_callback, ])\n\ntotal_training_time = sum (timing_and_evaluation_callback . training_times)\ncumulated_training_times = [ sum (timing_and_evaluation_callback . training_times [ : e + 1]) for e in range (training_params [\"nb epochs\"]) ]\ntest_loss_history = timing_and_evaluation_callback . test_loss\ntest_accuracy_history = timing_and_evaluation_callback . test_acc\n\n\nxlist = np . arange (training_params [\"nb epochs\"])\n\npyplot . plot (xlist, test_accuracy_history)\npyplot . plot (xlist, train_history . history [\"acc\"])\npyplot . legend ([\"Train acc\", \"Test acc\"])\npyplot . title (\"Accuracy\")\npyplot . xlabel (\"Epoch\")\npyplot . show ()\n\npyplot . plot (xlist, test_loss_history)\npyplot . plot (xlist, train_history . history [\"loss\"])\npyplot . legend ([\"Train loss\", \"Test loss\"])\npyplot . title (\"Loss\")\npyplot . xlabel (\"Epoch\")\npyplot . show ()\n\n\npyplot . plot (xlist, cumulated_training_times)\npyplot . title (\"Training time\")\npyplot . xlabel (\"Epoch\")\npyplot . show ()\n\nprint (total_training_time)\n\n\n","sub_path":"answers/3/mnist-cnn.py","file_name":"mnist-cnn.py","file_ext":"py","file_size_in_byte":10116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"122619134","text":"from pwn import *\n\np = process(\"./nonono\")\n#p = remote(\"edu-ctf.csie.org\", 10178)\nl = ELF(\"./libc.so.6\")\n# use gdb, find the remain addr - libc addr\noffset = 0x00007f2ba5b9eca0 - 0x00007f2ba57b3000\n\ndef add(size, note, IDX):\n\tp.sendlineafter('>>', '1')\n\tp.sendlineafter('IDX : ', str(IDX))\n\tp.sendlineafter('SIZE : ', str(size))\n\tp.sendlineafter('CONTENT: ', note)\n\ndef show(index):\n\tp.sendlineafter('>> ', '2')\n\tp.sendlineafter('IDX :', str(index))\n\ndef delete(index):\n\tp.sendlineafter('>> ', '3')\n\tp.sendlineafter('IDX : ', str(index))\n\n# 0x410 for tcache unsorted bin\nadd( 0x410, 'leak', 0)\n# prevent unsorted bin to be merged to Top\nadd( 0x20 , 'a', 1)\ndelete(0)\npause() # time to find offset\n\nshow(0)\np.recvline()\nl.address = u64( p.recv(6) + '\\0\\0' ) - offset\nsuccess( 'libc -> %s' % hex(l.address))\n\ndelete(1)\ndelete(1)\nprint(hex(l.sym.__free_hook))\npause()\nadd( 0x20, p64( l.sym.__free_hook ))\nadd( 0x20, 'a')\n# 0x4f322 is one_gadget\nadd( 0x20, p64( l.address + 0x4f322))\n\n# double free to trigger crash and libc will call __free_hook\ndelete(3)\n\np.sendline(\"id\")\nprint(p.recvline())\np.interactive()\n","sub_path":"PWN/AIS3/nonono/broken_exp.py","file_name":"broken_exp.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"639002601","text":"\"\"\"Adds encoded_at timestamp\n\nRevision ID: e62d063f87e7\nRevises: 11ddcccec497\nCreate Date: 2019-09-13 00:46:42.945184\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e62d063f87e7'\ndown_revision = '11ddcccec497'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('videos', sa.Column('encoded_at', sa.DateTime(timezone=True), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('videos', 'encoded_at')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e62d063f87e7_adds_encoded_at_timestamp.py","file_name":"e62d063f87e7_adds_encoded_at_timestamp.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"472621370","text":"__author__ = 'Leo Lourenco'\n\n\"\"\"\n Escreva um programa que leia dois números.\n Imprima o resultado da divisão do primeiro pelo segundo, assim como o resto da divsisão.\n Para isso utilize apenas os operadores de\n soma e subtração para calcular o resultado.\n Exemplo: 20 / 4 = 5\n Solução: 20 = ((-4 -4 -4 -4 -4) * 5).\n\"\"\"\n\nprint(\"***Divisão***\")\nnum1 = int(input(\"Digite o primeiro número: \"))\nnum2 = int(input(\"Digite o segundo número: \"))\n\nresto = 0\n\nif num1 or num2 != 0:\n print(\"A divisão dos números digitados é: \")\n while num1 >= num2:\n print(num1)\n num1 = num1 - num2\n resto = num1\n print(\"O resto da divisão é: %i\" % resto)","sub_path":"Cap5/Exercicio/exerc5.9.py","file_name":"exerc5.9.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"489626849","text":"from unittest import TestCase\nimport requests\nimport json\n\nfrom app import HOST, PORT\n\nURL_ADDRESS = f\"http://{HOST}:{PORT}\"\n\n\nclass ViewCurrenciesApiTesting(TestCase):\n header = {'content-type': 'application/json'}\n\n def test_disable_all_currencies_from_api(self):\n # Title: Disable all Currencies from API endpoint.\n # step 1: Find all symbols in database and insert in list\n # step 2: Do a for in the list and if the symbol is avialable, do the unavailable.\n # Expected result: If the symbol is available, it will unavailable the currency.\n\n from currency_exchange.blueprints.database.read import reading_all_symbols_from_table_exchange_rate\n all_symbols = reading_all_symbols_from_table_exchange_rate()\n\n for symbol in all_symbols:\n if symbol.available is True:\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"symbol\": symbol.symbol, \"available\": False}\n requests.post(url, data=json.dumps(data_content), headers=self.header)\n req = requests.get(f\"{URL_ADDRESS}/currencies\")\n req_json = req.json()\n self.assertNotIn(symbol.symbol, req_json)\n\n def test_enable_all_currencies_from_api(self):\n # Title: Enable all Currencies from API endpoint.\n # step 1: Find all symbols in database and insert in list\n # step 2: Do a for in the list and if the symbol is unavailable, do the available.\n # Expected result: If the symbol is unavailable, it will avialable the currency.\n\n from currency_exchange.blueprints.database.read import reading_all_symbols_from_table_exchange_rate\n all_symbols_from_database = reading_all_symbols_from_table_exchange_rate()\n\n for symbol in all_symbols_from_database:\n if symbol.available is False:\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"symbol\": symbol.symbol, \"available\": True}\n requests.post(url, data=json.dumps(data_content), headers=self.header)\n req = requests.get(f\"{URL_ADDRESS}/currencies\")\n req_json = req.json()\n self.assertIn(symbol.symbol, req_json)\n\n def test_add_new_currency(self):\n # Title: Add a new currency from API endpoint\n # step 1: Unavailable all currencies\n # step 2: Do a API post in endpoint /currencies and insert a symbol and rate thats is not in database\n # step 3: The API endpoint must response with right value\n # step 4: find the currency added in step 2 in the /currencies endpoint\n # Expeted result: The API endpoint must add sucessfully the currency.\n\n from currency_exchange.blueprints.utils.randomReturn import randomic_letters_uppercase\n from currency_exchange.blueprints.utils.randomReturn import random_float_number\n\n self.test_enable_all_currencies_from_api()\n url = f\"{URL_ADDRESS}/currencies\"\n symbol_random = randomic_letters_uppercase(10)\n rate_random = random_float_number()\n data_content = {\"symbol\": symbol_random, \"rate\": rate_random}\n requests.post(url, data=json.dumps(data_content), headers=self.header)\n r_get = requests.get(url, data=json.dumps(data_content), headers=self.header)\n json_request = r_get.json()\n self.assertIn(symbol_random, json_request)\n\n def test_delete_specific_currency(self):\n # Title: from currencies API endpoint, do a delete and verify its unavailable\n # step 1: Enable all currencies from database.\n # step 2: Do a delete method in API endpoint /currencies with a valid symbol\n # Expected result: The currency must be unavailable in currencies endpoint API.\n\n self.test_enable_all_currencies_from_api()\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"symbol\": \"USD\"}\n r = requests.delete(url, data=json.dumps(data_content), headers=self.header)\n # self.assertEqual(r.status_code, 200)\n # req = requests.get(f\"{URL_ADDRESS}/currencies\")\n # req_json = req.json()\n # self.assertNotIn(\"USD\", req_json)\n\n def test_disable_currency_verify_all_currencies(self):\n # Title: Disable the currency from post endpoint API and valid its unavailable.\n # step 1: Enable all currencies\n # step 2: Do a POST in currencie API endpoint with valid symbol and available in False\n # step 3: The symbol must be deleted\n # Expected Result: The symbol must be deleted sucessfully.\n\n self.test_enable_all_currencies_from_api()\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"symbol\": \"BRL\", \"available\": False}\n r = requests.post(url, data=json.dumps(data_content), headers=self.header)\n self.assertEqual(first=r.status_code, second=200)\n req = requests.get(f\"{URL_ADDRESS}/currencies\").json()\n self.assertNotIn(\"BRL\", req)\n\n def test_disable_from_post_currency_try_convert(self):\n # Title: Disable the currency\n # step 1: Enable all currencies from API\n # step 2: Do a POST in API endpoint with symbol and available = False\n # step 3: Verify the status code = 200, it indicate that the currency was disabled.\n # Expected result: The page must return the 200 status code.\n\n self.test_enable_all_currencies_from_api()\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"symbol\": \"BRL\", \"available\": False}\n r = requests.post(url, data=json.dumps(data_content), headers=self.header)\n self.assertEqual(first=r.status_code, second=200)\n\n def test_delete_all_currencies(self):\n # Title: Delete all currencies from API currencies endpoint\n # step 1: enable all currencies from API endpoint\n # step 2: read all symbols from database\n # step 3: Read each symbol in database and delete it in endpoint with delete method\n # Expected Result: The symbol must be deleted sucessfuly.\n\n self.test_enable_all_currencies_from_api()\n from currency_exchange.blueprints.database.read import reading_all_symbols_from_table_exchange_rate\n all_symbols = reading_all_symbols_from_table_exchange_rate()\n\n for symbol in all_symbols:\n if symbol.available:\n data_content = {\"symbol\": symbol.symbol}\n url = f\"{URL_ADDRESS}/currencies\"\n requests.delete(url, data=json.dumps(data_content), headers=self.header)\n req = requests.get(f\"{URL_ADDRESS}/currencies\")\n req_json = req.json()\n self.assertNotIn(symbol.symbol, req_json)\n\n def test_delete_currency_without_symbol(self):\n # Title: Delete a currency without insert the symbol in Body\n # step 1: Enable all currencies from API\n # step 2: Try to delete a currency without give a symbol in body of json\n # Expected result: It should return a status code 409 and not be able to delete a currency\n\n self.test_enable_all_currencies_from_api()\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"available\": True}\n delete_method = requests.delete(url, data=json.dumps(data_content), headers=self.header)\n self.assertEqual(delete_method.status_code, 409)\n\n def test_insert_new_currency_without_rate(self):\n # Title: Try to insert a new currency without give the rate in body\n # step 1: Try to do a POST METHOD in currencies API endpoint without the rate, only with the symbol\n # step 2: Verify the status code and check it return the 409\n # Expected Result: The status code must return the 409\n\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"symbol\": \"JJJJJJJJJ\"}\n post_method = requests.post(url, data=json.dumps(data_content), headers=self.header)\n self.assertEqual(post_method.status_code, 409)\n\n def test_insert_new_currency_without_symbol(self):\n # Title: Try to insert a new currency without symbol in JSON body\n # step 1: Try to do a POST METHOD in currencies API endpoint without the SYMBOL, only with the rate\n # step 2: Verify the status code and check it return the 409\n # Expected result: The status code must return the 409\n\n url = f\"{URL_ADDRESS}/currencies\"\n data_content = {\"rate\": 1.1}\n post_method = requests.post(url, data=json.dumps(data_content), headers=self.header)\n print(post_method)\n","sub_path":"currency_exchange/tests/unit/view/test_currencies_api.py","file_name":"test_currencies_api.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"82130642","text":"import os\n\nFILE_PATH = 'static/mymusic'\n\nclass music_file:\n def __init__(self, r, fn, mid=0):\n self.mid = mid # music_id\n self.r = unicode(r, 'utf-8') # root\n self.fn = unicode(fn, 'utf-8') # file_name\n self.url = self.r + u'/' + self.fn\n\nclass music_files:\n def __init__(self):\n self.files = []\n self.file_id = 0\n for mf in find_all_files(FILE_PATH):\n mf.mid = self.file_id\n self.files.insert(mf.mid, mf)\n self.file_id = self.file_id + 1\n\n def insert(self, i, mf):\n self.files.insert(i, mf)\n\n def get_files(self):\n return self.files\n\ndef find_all_files(directory):\n for root, dirs, files in os.walk(FILE_PATH):\n for f in files:\n mf = music_file(root,f)\n # yield os.path.join(root, file)\n yield mf\n","sub_path":"module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"556960361","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport feature_selection\nfrom Preprocessing import preprocessing_datetime, preposses_encoding\n\n\n# In[3]:\n\n\nfinal_features = feature_selection.selected_features\n\n\n# In[5]:\n\n\ndf = pd.read_excel(r\"data\\Data_Train.xlsx\")\n\n\n# In[8]:\n\n\ndf = preprocessing_datetime(df)\ndf= preposses_encoding(df)\n\n\n# In[13]:\n\n\nX = df[final_features]\nX= X.fillna(0)\n\n\n# In[14]:\n\n\nY= df[\"Price\"]\n\n\n# In[15]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)\n\n\n# In[16]:\n\n\nfrom sklearn.ensemble import RandomForestRegressor\nreg_rf = RandomForestRegressor()\nreg_rf.fit(X_train, y_train)\n\n\n# In[17]:\n\n\ny_pred = reg_rf.predict(X_test)\n\n\n# In[18]:\n\n\nreg_rf.score(X_train, y_train)\n\n\n# In[19]:\n\n\nreg_rf.score(X_test, y_test)\n\n\n# In[25]:\n\n\nplt.figure(figsize=(12,10))\nsns.scatterplot(x= y_test, y=y_pred)\n\n\n# In[26]:\n\n\nfrom sklearn import metrics\n\n\n# In[27]:\n\n\nmetrics.r2_score(y_test, y_pred)\n\n\n# In[28]:\n\n\n#Hyperparameter optimisation \n\n\n# In[29]:\n\n\nfrom sklearn.model_selection import RandomizedSearchCV\n\n\n# In[31]:\n\n\n# Number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# Number of features to consider at every split\nmax_features = ['auto', 'sqrt']\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\nmax_depth.append(None)\n# Minimum number of samples required to split a node\nmin_samples_split = [2, 5, 10]\n# Minimum number of samples required at each leaf node\nmin_samples_leaf = [1, 2, 4]\n# Method of selecting samples for training each tree\nbootstrap = [True, False]\n# Create the random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\nprint(random_grid)\n\n\n# In[33]:\n\n\n# Use the random grid to search for best hyperparameters\n# First create the base model to tune\nrf = RandomForestRegressor()\n# Random search of parameters, using 3 fold cross validation, \n# search across 100 different combinations, and use all available cores\nrf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)\n# Fit the random search model\nrf_random.fit(X_train, y_train)\n\n\n# In[34]:\n\n\nrf_random.best_params_\n\n\n# In[35]:\n\n\nprediction = rf_random.predict(X_test)\n\n\n# In[36]:\n\n\nplt.figure(figsize=(12,10))\nsns.scatterplot(x= y_test, y=y_pred)\n\n\n# In[37]:\n\n\nprint('MAE:', metrics.mean_absolute_error(y_test, prediction))\nprint('MSE:', metrics.mean_squared_error(y_test, prediction))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))\n\n\n# In[38]:\n\n\n#dumping model parametrs for next use \n\n\n# In[39]:\n\n\nimport pickle\n\n\n# In[41]:\n\n\npickle.dump(rf_random, open('model.pkl','wb'))\n\n\n# In[42]:\n\n\nmodel = open('model.pkl','rb')\n\n\n# In[44]:\n\n\nrf = pickle.load(model)\n\n\n# In[45]:\n\n\npred= rf.predict(X_test)\n\n\n# In[46]:\n\n\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, pred)))\n\n\n# In[48]:\n\n\n\n\n\n# In[50]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"652917507","text":"# danso=95.5\n# year = 2017\n# tocdo_tang = 1 #%\n# while danso<120:\n# \tyear += 1\n# \tdanso += danso*tocdo_tang/100\n# print(year,danso)\n#======================================================\nM = 100 # số tiền ban đầu \nr = 10 # %/năm , kì gửi 1 tháng \nm = 10 # số tiền gửi thêm mỗi tháng \nt = 0\n\n# sau bao nhiêu tháng thì số tiền >= 500\nwhile M< 500:\n\tt += 1 \n\tM += M*r/12/100 + m\nprint(t,M)","sub_path":"example/modul1_python/day2_3/day3_4.py","file_name":"day3_4.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"473835083","text":"# encoding=utf-8\n# @Time : 17-3-3\n# @File : common.py\n# @Author : jian\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport os\nfrom ..utils.serialize import loads,dumps\nfrom ..utils import logger\nimport zmq\nimport uuid\nimport json\nimport sys\nimport tarfile\nimport tempfile\nimport re\nimport requests\nfrom antgo.ant import flags\nfrom antgo.utils.fs import *\nfrom antgo import config\nfrom antgo.ant.utils import *\nimport yaml\nfrom antgo.utils.utils import *\nfrom datetime import datetime\nfrom antgo.ant.subgradientrpc import *\nfrom antgo.ant.mltalkerrpc import *\nfrom antgo.ant.warehouse import *\nfrom qiniu import Auth, put_file, etag, urlsafe_base64_encode\nif sys.version > '3':\n PY3 = True\nelse:\n PY3 = False\n\nFLAGS = flags.AntFLAGS\nConfig = config.AntConfig\n\n\nclass UnlabeledDataset(Dataset):\n def __init__(self, dataset):\n super(UnlabeledDataset, self).__init__()\n self.dataset_proxy = dataset\n\n def data_pool(self):\n for a, b in self.dataset_proxy.unlabeled():\n yield a, b\n\n @property\n def size(self):\n return self.dataset_proxy.unlabeled_size()\n\nclass AntBase(object):\n def __init__(self, ant_name, ant_context=None, ant_token=None, **kwargs):\n self.server_ip = getattr(Config, 'server_ip', 'www.mltalker.com')\n self.http_port = getattr(Config, 'server_port', '8999')\n self.http_prefix = 'http'\n self.ant_name = ant_name\n self.app_token = os.environ.get('APP_TOKEN', ant_token)\n self.app_connect = os.environ.get('APP_CONNECT', 'tcp://%s:%s' % (self.server_ip, '2345'))\n self.app_file_connect = os.environ.get('APP_FILE_CONNECT', 'tcp://%s:%s' % (self.server_ip, '2346'))\n\n self.subgradientserver = getattr(Config, 'subgradientserver', {})\n\n # three key info\n if 'main_file' in kwargs:\n self.main_file = kwargs['main_file']\n if 'main_folder' in kwargs:\n self.main_folder = kwargs['main_folder']\n if 'main_param' in kwargs:\n self.main_param = kwargs['main_param']\n if 'time_stamp' in kwargs:\n self._time_stamp = kwargs['time_stamp']\n else:\n self._time_stamp = timestamp()\n\n self._proxy = None\n if 'proxy' in kwargs:\n self._proxy = kwargs['proxy']\n\n self._signature = None\n if 'signature' in kwargs:\n self._signature = kwargs['signature']\n\n # current pid\n self._pid = str(os.getpid())\n \n # config zmq connect\n self._zmq_socket = zmq.Context().socket(zmq.REQ)\n self._zmq_socket.connect(self.app_connect)\n \n # config zmq file connect\n self._zmq_file_socket = zmq.Context().socket(zmq.DEALER)\n self._zmq_file_socket.connect(self.app_file_connect)\n \n # server flag\n self.app_server = self.__class__.__name__\n if not PY3:\n self.app_server = unicode(self.app_server)\n\n # subgradient rpc\n self.subgradient_rpc = SubgradientRPC(self.subgradientserver['subgradientserver_ip'], self.subgradientserver['subgradientserver_port'])\n self.mltalker_rpc = MLTalkerRPC(self.server_ip, self.http_port, self.app_token)\n\n # parse hardware resource config\n self._running_config = {'GPU_MODEL': '',\n 'GPU_NUM': 0,\n 'GPU_MEM': 0,\n 'CPU_MODEL': '',\n 'CPU_NUM': 0,\n 'CPU_MEM': 0,\n 'OS_PLATFORM': '',\n 'OS_VERSION': '',\n 'SOFTWARE_FRAMEWORK': '',\n 'DATASET': ''}\n\n self._description_config = {'SHORT_DESCRIPTION': '',\n 'LONG_DESCRIPTION': '',\n 'VERSION': '',\n 'INPUT_NUM': 1,\n 'INPUT_TYPE':[]}\n\n if ant_context is not None and ant_context.params is not None and ant_context.params._params is not None:\n config_params = ant_context.params._params\n if 'RUNNING_CONFIG' in config_params:\n if 'GPU_MODEL' in config_params['RUNNING_CONFIG']:\n self._running_config['GPU_MODEL'] = config_params['RUNNING_CONFIG']['GPU_MODEL']\n\n if 'GPU_NUM' in config_params['RUNNING_CONFIG']:\n self._running_config['GPU_NUM'] = config_params['RUNNING_CONFIG']['GPU_NUM']\n\n if 'GPU_MEM' in config_params['RUNNING_CONFIG']:\n self._running_config['GPU_MEM'] = config_params['RUNNING_CONFIG']['GPU_MEM']\n\n if 'CPU_MODEL' in config_params['RUNNING_CONFIG']:\n self._running_config['CPU_MODEL'] = config_params['RUNNING_CONFIG']['CPU_MODEL']\n\n if 'CPU_NUM' in config_params['RUNNING_CONFIG']:\n self._running_config['CPU_NUM'] = config_params['RUNNING_CONFIG']['CPU_NUM']\n\n if 'CPU_MEM' in config_params['RUNNING_CONFIG']:\n self._running_config['CPU_MEM'] = config_params['RUNNING_CONFIG']['CPU_MEM']\n\n if 'OS_PLATFORM' in config_params['RUNNING_CONFIG']:\n self._running_config['OS_PLATFORM'] = config_params['RUNNING_CONFIG']['OS_PLATFORM']\n\n if 'OS_VERSION' in config_params['RUNNING_CONFIG']:\n self._running_config['OS_VERSION'] = config_params['RUNNING_CONFIG']['OS_VERSION']\n\n if 'SOFTWARE_FRAMEWORK' in config_params['RUNNING_CONFIG']:\n self._running_config['SOFTWARE_FRAMEWORK'] = config_params['RUNNING_CONFIG']['SOFTWARE_FRAMEWORK']\n\n if 'DESCRIPTION_CONFIG' in config_params:\n if 'SHORT_DESCRIPTION' in config_params['DESCRIPTION_CONFIG']:\n self._description_config['SHORT_DESCRIPTION'] = config_params['DESCRIPTION_CONFIG']['SHORT_DESCRIPTION']\n\n if 'LONG_DESCRIPTION' in config_params['DESCRIPTION_CONFIG']:\n self._description_config['LONG_DESCRIPTION'] = config_params['DESCRIPTION_CONFIG']['LONG_DESCRIPTION']\n\n if 'VERSION' in config_params['DESCRIPTION_CONFIG']:\n self._description_config['VERSION'] = config_params['DESCRIPTION_CONFIG']['VERSION']\n\n if 'INPUT_NUM' in config_params['DESCRIPTION_CONFIG']:\n self._description_config['INPUT_NUM'] = config_params['DESCRIPTION_CONFIG']['INPUT_NUM']\n\n if 'INPUT_TYPE' in config_params['DESCRIPTION_CONFIG']:\n self._description_config['INPUT_TYPE'] = config_params['DESCRIPTION_CONFIG']['INPUT_TYPE']\n\n self._running_platform = kwargs.get('running_platform', 'local') # local, cloud\n\n # core\n self.ant_context = None\n if ant_context is not None:\n self.ant_context = ant_context\n self.ant_context.ant = self\n\n @property\n def zmq_socket(self):\n return self._zmq_socket\n @zmq_socket.setter\n def zmq_socket(self, val):\n self._zmq_socket = val\n self._zmq_socket.connect(self.app_connect)\n\n @property\n def zmq_file_socket(self):\n return self._zmq_file_socket\n @zmq_file_socket.setter\n def zmq_file_socket(self,val):\n self._zmq_file_socket = val\n self._zmq_file_socket.connect(self.app_file_connect)\n \n @property\n def pid(self):\n return self._pid\n @pid.setter\n def pid(self, val):\n self._pid = val\n\n @property\n def running_config(self):\n return self._running_config\n\n @property\n def description_config(self):\n return self._description_config\n\n @property\n def running_platform(self):\n return self._running_platform\n\n def package_codebase(self, prefix='qiniu', target_path='', signature='123'):\n logger.info('package code envoriment')\n if self.app_token is None:\n if not os.path.exists(os.path.join(self.main_folder, FLAGS.task())):\n shutil.copy(os.path.join(Config.task_factory, FLAGS.task()), os.path.join(self.main_folder))\n\n tar_shell = 'tar -czf - * | openssl enc -e -aes256 -out %s.tar.gz -k %s' % (self.name, signature)\n subprocess.call(tar_shell, shell=True, cwd=self.main_folder)\n\n logger.info('finish package')\n if prefix == 'qiniu':\n logger.info('upload codebase package')\n qiniu_address = qiniu_upload(os.path.join(self.main_folder, '%s.tar.gz'%self.name),\n bucket='experiment',\n max_size=100)\n # clear\n os.remove(os.path.join(self.main_folder, '%s.tar.gz' % self.name))\n return qiniu_address\n elif prefix == 'ipfs':\n pass\n elif prefix == 'baidu':\n pass\n elif prefix.startswith('ssh') or prefix.startswith('scp'):\n nodes = prefix.replace('scp:', '')\n node_ip_list = nodes.split(',')\n for ip in node_ip_list:\n if ip=='127.0.0.1' or ip=='localhost':\n continue\n\n logger.info('deploy code at %s'%ip)\n try:\n cmd_str = 'ssh %s %s' % (ip, 'mkdir -p %s'%target_path)\n logger.info('execute %s' % cmd_str)\n subprocess.call(cmd_str, shell=True)\n except:\n pass\n\n try:\n cmd_str = 'scp %s %s:%s' % (os.path.join(self.main_folder, '%s.tar.gz' % self.name), ip, target_path)\n logger.info('execute %s' % cmd_str)\n subprocess.call(cmd_str, shell=True)\n except:\n logger.error('couldnt distribute code base to %s' % ip)\n exit(-1)\n\n # clear\n os.remove(os.path.join(self.main_folder, '%s.tar.gz' % self.name))\n\n return '%s.tar.gz' % self.name\n\n def register_ant(self, codebase_address, running_config, server_config={}):\n request_url = '%s://%s:%d/api/aifactory/register'%(self.http_prefix, self.server_ip, self.http_port)\n\n data_str = json.dumps({'CODE_BASE': codebase_address,\n 'RUNNING_CONFIG': running_config,\n 'SERVER_CONFIG': server_config})\n response = requests.post(request_url, {'DATA': data_str})\n\n if response is None:\n return None\n\n if response.status_code in [200, 201]:\n result = json.loads(response.content)\n return result\n else:\n return None\n\n def submit_ant(self, codebase_address, running_config, server_config={}):\n pass\n\n def send(self, data, stage):\n if self.app_token is not None:\n # now_time = datetime.now().timestamp()\n now_time = timestamp()\n # 0.step add extra data\n data['APP_TOKEN'] = self.app_token\n data['APP_TIME'] = self.time_stamp\n if self.context is not None:\n if self.context.params is not None:\n data['APP_HYPER_PARAMETER'] = json.dumps(self.context.params.content)\n data['APP_RPC'] = \"INFO\"\n data['APP_STAGE'] = stage\n data['APP_NOW_TIME'] = now_time\n data[\"APP_NAME\"] = self.ant_name\n data[\"APP_SERVER\"] = self.app_server\n\n # exclude 'RECORD'\n record_data = None\n if 'RECORD' in data:\n record_data = data['RECORD']\n data.pop('RECORD')\n\n # 1.step send info\n self.zmq_socket.send(dumps(data))\n\n # 2.step ignore any receive info\n response = self.zmq_socket.recv(copy=False)\n response = loads(response)\n if 'status' in response:\n if response['status'] != 'OK':\n logger.error('error in uploading, maybe token isnot valid..')\n if self.app_server not in ['AntTrain','AntChallenge']:\n logger.error('perhaps you are using task token')\n return\n\n # 3.step upload record files\n if record_data is not None and os.path.exists(record_data):\n self.send_record(record_data, stage)\n \n def send_record(self, data, stage):\n if self.app_token is not None:\n # format: token, stage, time_stamp, now_time_stamp, block_id, block_size, max_block_size, block\n # 1.step uuid\n record_id = str(uuid.uuid1()) if PY3 else unicode(uuid.uuid1())\n \n # 2.step tar record\n temp_tar_file_path = os.path.join(tempfile.gettempdir(), '%s.tar.gz'%record_id)\n if os.path.exists(temp_tar_file_path):\n os.remove(temp_tar_file_path)\n tar = tarfile.open(temp_tar_file_path, 'w:gz')\n if os.path.isdir(data):\n # folder\n for f in os.listdir(data):\n if os.path.isfile(os.path.join(data, f)):\n tar.add(os.path.join(data, f), arcname=f)\n else:\n # single file\n tar.add(data)\n tar.close()\n \n # 3.step split data pieces\n with open(temp_tar_file_path, 'rb') as fp:\n BLOCK_SIZE = 8 * 1024\n block_data = fp.read(BLOCK_SIZE)\n \n # send data blocks\n while block_data != b\"\":\n self.zmq_file_socket.send(dumps((self.app_token,\n self.ant_name,\n stage,\n self.time_stamp,\n 'EXPERIMENT-RECORD',\n record_id,\n BLOCK_SIZE,\n len(block_data),\n block_data)))\n block_data = fp.read(BLOCK_SIZE)\n \n # send data EOF\n self.zmq_file_socket.send(dumps((self.app_token,\n self.ant_name,\n stage,\n self.time_stamp,\n 'EXPERIMENT-RECORD',\n record_id,\n BLOCK_SIZE,\n 0,\n b'')))\n # waiting until server tells us it's done\n flag = self.zmq_file_socket.recv()\n\n # 4.step clear\n if os.path.exists(temp_tar_file_path):\n os.remove(temp_tar_file_path)\n\n def send_file(self, file_path, name, stage, mode, target_name):\n # 1.step whether file_path exist\n if not os.path.isfile(file_path):\n return False\n\n # 2.step split data pieces\n with open(file_path, 'rb') as fp:\n BLOCK_SIZE = 8 * 1024\n block_data = fp.read(BLOCK_SIZE)\n\n # send data blocks\n while block_data != b\"\":\n self.zmq_file_socket.send(dumps((self.app_token,\n name,\n stage,\n self.time_stamp,\n mode,\n target_name,\n BLOCK_SIZE,\n len(block_data),\n block_data)))\n block_data = fp.read(BLOCK_SIZE)\n\n # send data EOF\n self.zmq_file_socket.send(dumps((self.app_token,\n name,\n stage,\n self.time_stamp,\n mode,\n target_name,\n BLOCK_SIZE,\n 0,\n b'')))\n # waiting until server tells us it's done\n flag = self.zmq_file_socket.recv()\n return True\n\n def rpc(self, cmd=\"\"):\n if self.app_token is not None:\n # 0.step config data\n data = {}\n data['APP_TOKEN'] = self.app_token\n data['APP_TIME'] = self.time_stamp\n data['APP_RPC'] = cmd\n data['APP_STAGE'] = 'RPC'\n data['APP_NOW_TIME'] = timestamp()\n data[\"APP_NAME\"] = self.ant_name\n data['APP_SERVER'] = self.app_server\n\n # 1.step send rpc\n self.zmq_socket.send(dumps(data))\n\n # 2.step receive info\n try:\n response = loads(self.zmq_socket.recv(copy=False))\n if len(response) == 0:\n return None\n return response\n except:\n return None\n\n return None\n\n def download(self, source_path, target_path=None, target_name=None, archive=None):\n if target_path is None:\n target_path = os.curdir\n\n is_that = re.match('^((https|http|ftp|rtsp|mms)?://)', source_path)\n if is_that is not None:\n download(source_path, target_path, fname=target_name)\n\n is_gz = re.match('.*\\.gz', target_name)\n if is_gz is not None:\n if archive is not None:\n extracted_path = os.path.join(target_path, archive)\n else:\n extracted_path = target_path\n\n if not os.path.exists(extracted_path):\n os.makedirs(extracted_path)\n\n tar = tarfile.open(os.path.join(target_path, target_name))\n tar.extractall(extracted_path)\n tar.close()\n target_path = extracted_path\n\n return target_path\n\n def remote_api_request(self, cmd, data=None, action='get'):\n url = '%s://%s:%s/%s'%(self.http_prefix, self.server_ip, self.http_port, cmd)\n user_authorization = {'Authorization': \"token \" + self.app_token}\n try:\n response = None\n if action == 'get':\n # get a resource at server\n response = requests.get(url, data=data, headers=user_authorization)\n elif action == 'post':\n # build a resource at server\n response = requests.post(url, data=data, headers=user_authorization)\n elif action == 'patch':\n # update part resource at server\n response = requests.patch(url, data=data, headers=user_authorization)\n elif action == 'delete':\n # delete resource at server\n response = requests.delete(url, data=data, headers=user_authorization)\n\n if response is None:\n return None\n\n if response.status_code != 200 and response.status_code != 201:\n return None\n\n response_js = json.loads(response.content.decode())\n return response_js\n except:\n return None\n\n @property\n def stage(self):\n return self.context.stage\n @stage.setter\n def stage(self, val):\n self.context.stage = val\n\n @property\n def token(self):\n return self.app_token\n @token.setter\n def token(self, val):\n self.app_token = val\n\n @property\n def name(self):\n return self.ant_name\n\n @property\n def context(self):\n return self.ant_context\n\n @context.setter\n def context(self, val):\n self.ant_context = val\n self.ant_context.ant = self\n\n @property\n def proxy(self):\n return self._proxy\n\n @property\n def signature(self):\n return self._signature\n\n @property\n def time_stamp(self):\n return self._time_stamp\n \n def clone(self):\n if self.pid != str(os.getpid()):\n # reset process pid\n self.pid = str(os.getpid())\n \n # update zmq sockets\n # (couldnt share socket in differenet process)\n self.zmq_socket = zmq.Context().socket(zmq.REQ)\n self.zmq_file_socket = zmq.Context().socket(zmq.DEALER)\n \n # update context\n ctx = main_context(self.main_file, self.main_folder)\n if self.main_param is not None:\n main_config_path = os.path.join(self.main_folder, self.main_param)\n params = yaml.load(open(main_config_path, 'r'))\n ctx.params = params\n \n if self.context.from_experiment is not None:\n ctx.from_experiment = self.context.from_experiment\n \n self.context = ctx\n","sub_path":"antgo/ant/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":18954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"3773620","text":"import tensorflow.compat.v1 as tf\nimport numpy as np\n\n\nm = 1740\n\nx_batch = np.random.rand(m)\ny_batch = np.random.rand(1)\n\nweights = np.random.rand(m)\nbiases = np.random.rand(m)\n\nwith tf.Session() as sess:\n\n x = tf.placeholder(tf.float32, shape=(m, ), name='x')\n y = tf.placeholder(tf.float32, shape=(1, ), name='y')\n # w = tf.Variable(np.random.rand(m), name='W', dtype=tf.float32)\n # b = tf.Variable(np.random.rand(m), name='b', dtype=tf.float32)\n w = tf.placeholder(tf.float32, shape=(m, ), name='W')\n b = tf.placeholder(tf.float32, shape=(m, ), name='b')\n\n mu = tf.constant(1, dtype=tf.float32)\n\n _ = tf.Variable(initial_value=np.random.rand(1))\n\n\n h = tf.reduce_sum(tf.multiply(w, x))\n c = tf.multiply(y, h)\n distances = tf.subtract(1., c)\n # maximum = tf.maximum(0., distances)\n #maximum = tf.boolean_mask(distances, tf.greater(0., distances))\n\n # Look here for gradient of SVM objective function: http://u.cs.biu.ac.il/~jkeshet/teaching/aml2016/sgd_optimization.pdf\n maximum = tf.cast(tf.greater(distances, 0.), tf.float32)\n\n g = tf.multiply(maximum, x)\n\n g = tf.multiply(mu, g)\n w = tf.subtract(w, g, name='update')\n\n sess.run(tf.initialize_all_variables())\n feed_dict = {x: x_batch, y: y_batch, w: weights, b: biases}\n sess.run(w, feed_dict)\n tf.train.Saver().save(sess, 'model.ckpt')\n","sub_path":"tabla/tabla/benchmarks/onnx/svm_tf.py","file_name":"svm_tf.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"118914006","text":"a=[]\r\nk=int(input(\"Enter number of elements: \")) \r\nfor i in range(k):\r\n b=input(\"Enter element: \")\r\n a.append(b)\r\nc=[]\r\nfor b in a:\r\n if a.count(b)==1:\r\n c.append(b)\r\nprint(\"Non-repeated numbers: \",c)\r\ninput()\r\n","sub_path":"km73/Hirianska_Viktoriia/5/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"426718805","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nfrom tqdm.notebook import tqdm\nfrom datetime import datetime\n\nfrom transformer.transformer import (\n\tTransformer, create_pad_mask, create_look_ahead_mask\n)\n\n\n__SOS_IMAGE_TOKEN__ = 64\n__EOS_IMAGE_TOKEN__ = 65\n__MASK_IMAGE_TOKEN__ = 66\n\n__SOS_TEXT_TOKEN__ = 67\n__EOS_TEXT_TOKEN__ = 68\n__PAD_TEXT_TOKEN__ = 69\n\n\ndef get_latest_snapshot_name(path):\n \"\"\"\n A function to get the name of a latest snapshot file\n \"\"\"\n\n if not os.path.isabs(path): path = os.path.join(os.getcwd(), path)\n snapshots = [os.path.join(path, s) for s in os.listdir(path)]\n\n if not snapshots: raise RuntimeError('No snapshots found')\n latest_snapshot = max(snapshots, key=os.path.getctime)\n \n return latest_snapshot\n\n\nclass Trainer():\n\n def __init__(\n self,\n model,\n optimizer,\n device, \n train_dataset,\n val_dataset=None,\n gradient_clipping=None,\n snapshot_path=None \n ):\n\n \"\"\" \n :param model: a transformer model to train\n :type model : torch.nn.Module\n\n :type train_dataset: Text2ImageDataset\n :type val_dataset : Text2ImageDataset\n\n \"\"\"\n\n default_optimizer_params = {'lr': 1e-4}\n\n self.model = model\n self.device = device\n\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n \n self.optimizer = optimizer\n \n self.gradient_clipping = gradient_clipping\n self.snapshot_path = snapshot_path\n # internal snapshot parameters\n self.date_format = '%Y-%m-%d_%H-%M-%S'\n\n\n def load_latest_snapshot(self):\n\n sname = get_latest_snapshot_name(self.snapshot_path)\n snapshot = torch.load(sname)\n\n error_msg_header = f'Error loading snapshot {sname}' +\\\n '- incompatible snapshot format. '\n if 'optimizer' not in snapshot:\n raise KeyError(error_msg_header + 'Key \"optimizer\" is missing')\n if 'model' not in snapshot:\n raise KeyError(error_msg_header + 'Key \"model\" is missing')\n\n self.model.load_state_dict(snapshot['model'])\n self.optimizer.load_state_dict(snapshot['optimizer'])\n\n\n def save_model(self, replace_latest=False):\n\n if self.snapshot_path is None: return\n \n time_string = datetime.now().strftime(self.date_format)\n\n states = {\n 'model' : self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n\n if not replace_latest:\n torch.save(states, os.path.join(self.snapshot_path, time_string + '.pth'))\n else:\n try:\n os.remove(get_latest_snapshot_name(self.snapshot_path))\n except Exception:\n pass\n torch.save(states, os.path.join(self.snapshot_path, time_string + '.pth'))\n\n\n def train(self, n_epochs=100, batch_size=32, save_interval=1000, from_zero=True, plot_loss_history=True):\n\n MAX_TEXT_LEN = self.train_dataset.max_text_length\n weight = torch.ones(self.train_dataset.annotations_language.n_words).to(self.device)\n\n self.model = self.model.to(self.device)\n\n\n criterion = nn.BCELoss()\n batch_index = 0 \n\n if not from_zero: self.load_latest_snapshot()\n\n batch_generator = torch.utils.data.DataLoader(\n self.train_dataset, batch_size=32, shuffle=True, num_workers=1\n )\n\n loss_history = []\n\n\n for i in tqdm(range(n_epochs), desc='Training'):\n \t\n self.model.train(True)\n\n loss_epoch = []\n \n for j, b in enumerate(tqdm(batch_generator, desc=f'Epoch {i+1} of {n_epochs}')):\n \n self.optimizer.zero_grad()\n self.model.zero_grad()\n\n in_ = b.to(self.device)\n\n mask = create_look_ahead_mask(in_)\n\n target = F.one_hot(in_.clone().detach()[:, 1:], num_classes=1186).float()\n out = F.softmax(self.model(in_, mask), dim=-1)\n \n loss_value = 1. / 8. * criterion(out[:, :MAX_TEXT_LEN], target[:, :MAX_TEXT_LEN]) +\\\n 7. / 8. * criterion(out[:, MAX_TEXT_LEN:-1], target[:, MAX_TEXT_LEN:]) \n \t\n loss_value.backward()\n self.optimizer.step()\n\n batch_index += 1\n\n if batch_index % save_interval == 0: self.save_model()\n\n loss_epoch.append(loss_value.item())\n\n \n loss_history.append(np.mean(np.array(loss_epoch)))\n\n if plot_loss_history:\n plt.figure(figsize=(8, 8))\n plt.plot(loss_history, label='loss')\n plt.legend()\n plt.show()\n\n self.model = self.model.cpu()\n\n return loss_history\n\n\n\n","sub_path":"transformer/transformer_trainer.py","file_name":"transformer_trainer.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"406231097","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 16 12:18:34 2018\n\n@author: yamini\n\n\"\"\"\nclass Solution:\n def isPowerOfThree(self, n):\n if n==1:\n return True\n elif n<=0:\n return False\n else:\n while(n>1):\n if n%3==0:\n n=n/3\n continue\n else:\n return False\n return True","sub_path":"IsPowerOfThree.py","file_name":"IsPowerOfThree.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"495968720","text":"import unittest\nimport cannibal_numbers as cannibal\n\n\nclass TestCannibal(unittest.TestCase):\n def test_target_ten(self):\n test_input = [21, 9, 5, 8, 10, 1, 3]\n self.assertEqual(cannibal.cannibalise(test_input, 10), 4)\n\n def test_target_fifteen(self):\n test_input = [21, 9, 5, 8, 10, 1, 3]\n self.assertEqual(cannibal.cannibalise(test_input, 15), 2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Python/336E/cannibal_numbers_test.py","file_name":"cannibal_numbers_test.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"432267248","text":"from flask import json, request, Blueprint, jsonify\nfrom evodoc.exception import DbException, ApiException\nfrom evodoc.entity import *\nfrom evodoc.api import response_ok, response_ok_list, response_ok_obj, validate_token\n\nproject = Blueprint('project', __name__, url_prefix='/project')\n\n@project.route('',methods=['GET'])\ndef get_project_by_id_action():\n \"\"\"\n Get project data by it's id\n \"\"\"\n token = request.args.get('token')\n id = request.args.get('id')\n validate_token(token)\n #check permissions in the future\n data = Project.get_project_by_id(id)\n return response_ok_obj(data)\n\n@project.route('/name', methods=['GET'])\ndef get_project_by_name_action(name):\n \"\"\"\n Get project data by it's name\n \"\"\"\n token = request.args.get('token')\n name = request.args.get('name')\n validate_token(token)\n #check permissions in the future\n data = Project.get_project_by_name(name)\n if (data == None):\n return response_err(ApiException(400, \"Name already in use.\"))\n return response_ok_obj(data)\n\n@project.route('/all', methods=['GET'])\ndef get_project_all_action():\n \"\"\"\n Get data for all projects\n \"\"\"\n token = request.args.get('token')\n validate_token(token)\n #check permissions in the future\n data = Project.get_project_all()\n return response_ok_list(data)\n\n@project.route(\"/update_or_create\", methods=['POST'])\ndef update_or_create_poject_action():\n \"\"\"\n Update or create poject\n \"\"\"\n data = request.get_json()\n if data == None:\n raise ApiException(400, \"data\")\n if (data['token'] == None):\n raise ApiException(403, \"Invalid token\")\n if (('poject_id' not in data) or (data['poject_id'] == None)):\n poject_id = None\n else:\n poject_id = data['poject_id']\n validate_token(data['token'])\n #check permissions in the future\n data = Project.create_or_update_project_by_id_array(poject_id, data['data'], True)\n if (data == None):\n raise ApiException(400, \"Name already in use.\")\n return response_ok_obj(data)\n\n@project.errorhandler(ApiException)\n@project.errorhandler(DbException)\ndef __response_err(data):\n return jsonify(data.message), data.errorCode\n","sub_path":"evodoc/api/projectapi.py","file_name":"projectapi.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"110250182","text":"\"\"\"\n Given an array of time intervals (start, end) for classroom lectures (possibly overlapping),\n find the minimum number of rooms required.\n\n For example, given [(30, 75), (0, 50), (60, 150)], you should return 2.\n\"\"\"\n\nclass Room:\n def __init__(self, interval=None):\n self.intervals = []\n if interval:\n self.intervals.append(interval)\n def occupied(self, interval):\n start = interval[0]\n end = interval[1]\n for i in self.intervals:\n if (start >= i[0] and start <= i[1]) or (end >= i[0] and end <= i[1]):\n return True\n return False\n\ndef minimum_rooms(intervals):\n rooms = []\n if len(intervals) > 0:\n rooms.append(Room())\n for interval in intervals:\n i = 0\n ok = False\n while (not ok and i < len(rooms)):\n if not rooms[i].occupied(interval):\n rooms[i].intervals.append(interval)\n ok = True\n i += 1\n if not ok:\n rooms.append(Room(interval))\n return len(rooms)\n\n\n\nprint(minimum_rooms([(30, 75), (0, 50), (60, 150)]))\nprint(minimum_rooms([(30, 75), (0, 530), (60, 150),(30, 75), (10, 50), (233, 150),(30, 735), (10, 530), (3, 54),(30, 75), (0, 530), (60, 150),(30, 75), (10, 50), (233, 150),(30, 735), (10, 530), (3, 54),(30, 75), (0, 530), (60, 150),(30, 75), (10, 50), (233, 150),(30, 735), (10, 530), (3, 54),(30, 75), (0, 530), (60, 150),(30, 75), (10, 50), (233, 150),(30, 735), (10, 530), (3, 54)]))\n","sub_path":"DailyProblem/problem21.py","file_name":"problem21.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307671704","text":"import scrapy\nfrom maoyan.items import MaoyanItem\nfrom scrapy.selector import Selector\n\n\nclass MaoyanSpider(scrapy.Spider):\n # 定义爬虫名称\n name = 'maoyan_movies'\n allowed_domains = ['maoyan.com']\n # 起始URL列表\n start_urls = ['https://maoyan.com/films?showType=3']\n\n # def parse(self, response):\n # pass\n\n # 爬虫启动时,引擎自动调用该方法,并且只会被调用一次,用于生成初始的请求对象(Request)。\n # start_requests()方法读取start_urls列表中的URL并生成Request对象,发送给引擎。\n # 引擎再指挥其他组件向网站服务器发送请求,下载网页\n # 自定义请求网址\n # def start_requests(self):\n # for i in range(0, 10):\n # url = f'https://movie.douban.com/top250?start={i*25}'\n # yield scrapy.Request(url=url, callback=self.parse)\n # # url 请求访问的网址\n # # callback 回调函数,引擎回将下载好的页面(Response对象)发给该方法,执行数据解析\n # # 这里可以使用callback指定新的函数,不是用parse作为默认的回调参数\n\n # 解析函数\n def parse(self, response):\n # print(response.text)\n print(response.url)\n movie_selector_generator = (movie for movie in Selector(\n response=response).xpath('//dl[@class=\"movie-list\"]').xpath('//dd'))\n for i in range(10):\n item = MaoyanItem()\n movie = next(movie_selector_generator)\n film_title = movie.xpath('./div[2]/a/text()').extract()\n item['film_title'] = film_title\n print(film_title)\n movie_type = movie.xpath(\n './div[1]/div[2]/a/div/div[2]/text()').extract()[-1].strip()\n item['movie_type'] = movie_type\n print(movie_type)\n plan_date = movie.xpath(\n './div[1]/div[2]/a/div/div[4]/text()').extract()[-1].strip()\n item['plan_date'] = plan_date\n print(plan_date)\n yield item\n","sub_path":"week01/task02/spiders/maoyan/spiders/maoyan_movies.py","file_name":"maoyan_movies.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"208925204","text":"\"\"\" slit head motor control\n\"\"\"\nfrom __future__ import division, absolute_import\nimport os\n\nfrom twisted.internet.protocol import Protocol, ClientFactory\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\nfrom twisted.internet import reactor\nimport numpy\n# from twisted.internet.defer import Deferred\n\n#@todo, implement timeouts\nclass MotorConfig(object):\n def __init__(self):\n self.configFile = os.path.join(os.getenv(\"PYMAPPER_DIR\"), \"etc\", \"motorConfig.dat\")\n self.hostname = None\n self.port = None\n self.startPos = None\n self.endPos = None\n self.speed = None\n self.slitPos = None\n self.direction = None\n self.loadMe() # load from file and set attrs\n self.checkMe()\n\n\n def loadMe(self):\n slitPos = {}\n slitPopulate = False\n with open(self.configFile, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n if not line:\n continue\n if line.startswith(\"#\"):\n continue\n line = line.lower()\n if line.startswith(\"hostname\"):\n self.hostname = str(self.getlineValue(line))\n elif line.startswith(\"port\"):\n self.port = int(self.getlineValue(line))\n elif line.startswith(\"startpos\"):\n self.startPos = float(self.getlineValue(line))\n elif line.startswith(\"endpos\"):\n self.endPos = float(self.getlineValue(line))\n elif line.startswith(\"speed\"):\n self.speed = float(self.getlineValue(line))\n elif line.startswith(\"slitpos\"):\n # begin populating dict\n slitPopulate = True\n elif line.startswith(\"}\"):\n # done populating dict\n slitPopulate = False\n self.slitPos = slitPos\n elif slitPopulate:\n line = line.strip(\",\")\n fiber, motorPos = line.split(\":\")\n fiber = int(fiber)\n motorPos = float(motorPos)\n slitPos[fiber] = motorPos\n self.direction = numpy.sign(self.endPos-self.startPos)\n\n def getlineValue(self, line):\n return line.split(\"=\")[-1].strip()\n\n def checkMe(self):\n if None in [\n self.hostname,\n self.port,\n self.startPos,\n self.endPos,\n self.speed,\n self.direction\n ]:\n raise RuntimeError(\"Some Missing Motor configuration\")\n # check that all 300 fibers are in slit pos\n if numpy.array_equal(self.slitPos.keys(), range(1,301)) == 300:\n raise RuntimeError(\"Missing motor positions in slit pos config\")\n\n def posFromTime(self, timestamp):\n \"\"\"Return motor position for a given time\n \"\"\"\n return self.startPos + self.direction*self.speed*timestamp\n\nMOTOR_CONFIG = MotorConfig()\n\nclass Command(object):\n def __init__(self, cmdStr, callFunc=None, timeout=0):\n self.cmdStr = cmdStr\n self.callFuncs = []\n if callFunc is not None:\n self.callFuncs.append(callFunc)\n self.isDone = False\n\n def setDone(self):\n if self.isDone:\n raise RuntimeError(\"cannot set command %s done, already done!\"%self.cmdStr)\n print(\"setting %s done!\"%self.cmdStr)\n self.isDone = True\n for func in self.callFuncs:\n func()\n\n def addCallback(self, callFunc):\n self.callFuncs.append(callFunc)\n\nclass MotorProtocol(Protocol):\n\n def __init__(self, motorControllerInstance):\n self.mci = motorControllerInstance\n\n def dataReceived(self, data):\n \"\"\"Called each time a line of data is received from the ASCII controller\n \"\"\"\n self.mci.dataReceived(data)\n\n # def sendCommand(self, cmdStr):\n # \"\"\"Sent ascii text to the ASCII controller\n # \"\"\"\n # self.transport.write(\"%s\\n\" % cmdStr)\n\n def connectionMade(self):\n \"\"\"Called when a connection is made\n \"\"\"\n print(\"connection made\")\n\nclass MotorClientFactory(ClientFactory):\n def __init__(self, motorControllerInstance):\n self.mci = motorControllerInstance\n\n def startedConnecting(self, connector):\n print(\"Started to connect to motor.\")\n\n def buildProtocol(self, addr):\n print(\"Connected to motor.\")\n return MotorProtocol(self.mci)\n\n # def clientConnectionLost(self, connector, reason):\n # print(\"Lost connection to motor. Reason:\", reason)\n\n # def clientConnectionFailed(self, connector, reason):\n # print(\"Connection failed!\")\n #raise RuntimeError(\"Connection to motor failed. Reason:%s\"%reason)\n\n# class MotorStatus(object):\n# def __init__(self):\n# self.speed = None\n# self.currentPosition = None\n# self.targetPosition = None\n# self.isHomed = None\n# self.laserOn = None\n\nclass MotorController(object):\n def __init__(self, readyCallback=None):\n \"\"\"readyCallback called when MotorController is ready to scan!\n \"\"\"\n self.readyCallback = readyCallback\n # self.status = MotorStatus()\n self.mcf = MotorClientFactory(self)\n self.protocol = None # will be set after connection made\n self.currCmd = Command(cmdStr=\"dummy\")\n self.currCmd.setDone()\n self.commandQueue = []\n self.isHomed = False\n\n def addReadyCallback(self, readyCallback):\n self.readyCallback = readyCallback\n\n def connect(self):\n \"\"\"Returns a deferred\n \"\"\"\n point = TCP4ClientEndpoint(reactor, MOTOR_CONFIG.hostname, MOTOR_CONFIG.port)\n connDeferred = point.connect(self.mcf)\n connDeferred.addCallback(self.gotProtocol)\n # and then prepare the controller to scan!\n connDeferred.addCallback(self.prepareToScan)\n # if the connection failed, let us know\n connDeferred.addErrback(self.connFailed)\n\n def disconnect(self):\n print(\"disconnecting from ASCII server\")\n return self.protocol.transport.loseConnection()\n print(\"killing twisted event loop\")\n reactor.stop()\n\n def connFailed(self, failure):\n print(\"conn failed errback\")\n print(str(failure))\n reactor.stop()\n # raise RuntimeError(\"conn failed\", str(failure))\n\n def gotProtocol(self, protocol):\n self.protocol = protocol\n\n def prepareToScan(self, foo):\n print(\"preparing for scan\")\n # foo is ignored arg passed via callback framework\n # could send a stop first...\n self.getStatus(callFunc=self.checkHomeThenMove)\n\n def scan(self, callFunc=None):\n print(\"beginning scan\")\n self.move(MOTOR_CONFIG.endPos)\n self.laserOff(callFunc=callFunc)\n # send motor back to start position\n self.resetAfterScan()\n\n def resetAfterScan(self):\n print(\"resetAfterScan\")\n # foo is ignored arg passed via callback framework\n # could send a stop first...\n # self.getStatus(callFunc=self.checkHomeThenMove)\n self.move(MOTOR_CONFIG.startPos, callFunc=self.disconnect)\n # try killin twisted event loop now?\n # self.protocol.transport.loseConnection()\n # reactor.stop()\n\n\n def checkHomeThenMove(self):\n if not self.isHomed:\n print(\"Slit Head Axis is not homed. Home it before proceeding!\")\n raise RuntimeError(\"Slit Head Axis is not homed. Home it before proceeding!\")\n reactor.stop()\n # raise RuntimeError(\"Slit Head Axis is not homed. Home it before proceeding!\")\n else:\n print(\"Axis is Homed!!\")\n # move motor in position for scan.\n self.setSpeed(MOTOR_CONFIG.speed)\n self.move(MOTOR_CONFIG.startPos)\n self.laserOn(callFunc=self.readyCallback)\n\n def getStatus(self, callFunc=None):\n print(\"getStatus\")\n return self.queueCommand(\"status\", callFunc=callFunc)\n\n def setSpeed(self, value, callFunc=None):\n print(\"set speed to %.2f\"%float(value))\n return self.queueCommand(\"speed %.2f\"%float(value), callFunc=callFunc)\n\n def move(self, value, callFunc=None):\n print(\"move to %.2f\"%float(value))\n return self.queueCommand(\"move %.2f\"%float(value), callFunc=callFunc)\n\n def laserOn(self, callFunc=None):\n print(\"laser on\")\n return self.queueCommand(\"lonn\", callFunc=callFunc)\n\n def laserOff(self, callFunc=None):\n print(\"laser off\")\n return self.queueCommand(\"loff\", callFunc=callFunc)\n\n def dataReceived(self, data):\n if self.currCmd is None:\n print(\"unsolicited dataReceived: %s\"%str(data))\n return # don't do anything with unsolicited output...\n for dataline in data.split(\"\\n\"):\n dataline = dataline.strip().lower()\n if not dataline:\n # ignore blank strings...\n continue\n print(\"laser output:\", dataline)\n # right now I only care if the axis is homed\n # don't care about managing any other status bits,\n # however add a parser here to keep track of things\n # eg if status needs to be checked frequently...\n # data_lowered = data.lower()\n if \"homed\" in dataline:\n if \"not_homed\" in dataline:\n self.isHomed = False\n else:\n self.isHomed = True\n if dataline.endswith(\"ok\"):\n # running command is done\n self.currCmd.setDone()\n\n def sendCommand(self, command):\n if not self.currCmd.isDone:\n raise RuntimeError(\"cannot send %s, currently busy with %s\"%(command.cmdStr, self.currCmd.cmdStr))\n self.currCmd = command\n print(\"sending: \", command.cmdStr)\n self.protocol.transport.write(command.cmdStr)\n\n def queueCommand(self, cmdStr, callFunc=None):\n print(\"queueCommand\", cmdStr)\n command = Command(cmdStr, callFunc=callFunc)\n command.addCallback(self.runQueue)\n self.commandQueue.append(command)\n self.runQueue()\n\n def runQueue(self):\n if not self.currCmd.isDone:\n # do nothing, command already executing\n return\n if self.commandQueue:\n # at least one command waiting to execute\n self.sendCommand(self.commandQueue.pop(0))\n\n\n\n\nif __name__ == \"__main__\":\n mc = None\n def cleanup():\n global mc\n print(\"Cleaning up\")\n mc.resetAfterScan()\n def imready():\n global mc\n print(\"I'm READY!!!!\")\n mc.scan(cleanup)\n mc = MotorController(imready)\n # reactor.callLater(mc.resetAfterScan)\n reactor.run()\n\n\"\"\"\nstatus example:\n\nSLIT_HEAD_AXIS:\n__MOVE_ACTUAL_POSITION 0.0\n__TARGET_POSITION 12.0000000\n__DRIVE_STATUS: OFF\n__MOTOR_CURRENT: 0.0\n__DRIVE_SPEED_SP 0.89999998\n__DRIVE_SPEED 0.89999998\n__DRIVE_ACCEL 20\n__DRIVE_DECEL 20\n__MOVE_RANGE 0.0 - 155.000000\n__HARDWARE_FAULT 0\n__INSTRUCTION_FAULT 0\n__HOMED\nVERTICAL_AXIS:\n__MOVE_ACTUAL_POSITION 13.1517000\n__TARGET_POSITION 13.1999998\n__DRIVE_STATUS: OFF\n__MOTOR_CURRENT: 0.0\n__DRIVE_SPEED_SP 50.0000000\n__DRIVE_SPEED 50.0000000\n__DRIVE_ACCEL 20\n__DRIVE_DECEL 20\n__MOVE_RANGE 0.0 - 950.000000\n__HARDWARE_FAULT 0\n__INSTRUCTION_FAULT 0\nFOOT_SWITCH: OFF\nLASER: OFF\n\n\nnot homed:\n\nstatus\nSTATUS\n\nSLIT_HEAD_AXIS:\n__MOVE_ACTUAL_POSITION -0.01890000\n__TARGET_POSITION 12.0000000\n__DRIVE_STATUS: OFF\n__MOTOR_CURRENT: 0.0\n__DRIVE_SPEED_SP 1.00000000\n__DRIVE_SPEED 1.00000000\n__DRIVE_ACCEL 20\n__DRIVE_DECEL 20\n__MOVE_RANGE 0.0 - 155.000000\n__HARDWARE_FAULT 0\n__INSTRUCTION_FAULT 0\n__NOT_HOMED\nVERTICAL_AXIS:\n__MOVE_ACTUAL_POSITION 13.1517000\n__TARGET_POSITION 13.1999998\n__DRIVE_STATUS: OFF\n__MOTOR_CURRENT: 0.0\n__DRIVE_SPEED_SP 50.0000000\n__DRIVE_SPEED 50.0000000\n__DRIVE_ACCEL 20\n__DRIVE_DECEL 20\n__MOVE_RANGE 0.0 - 950.000000\n__HARDWARE_FAULT 0\n__INSTRUCTION_FAULT 0\nFOOT_SWITCH: OFF\nLASER: OFF\n\nOK\n\n\nhome\nHOME\n\n__SPEED: 1.00000000\n__HOME_ACTUAL_POSITION 9.99999975e-05\nOK\nmove 10\nMOVE 10\n\n__SPEED: 1.00000000\n__MOVE_ACTUAL_POSITION 1.15330005\n__MOVE_ACTUAL_POSITION 2.35339999\n__MOVE_ACTUAL_POSITION 3.55539989\n__MOVE_ACTUAL_POSITION 4.75740004\n__MOVE_ACTUAL_POSITION 5.95730019\n__MOVE_ACTUAL_POSITION 7.15939999\n__MOVE_ACTUAL_POSITION 8.35939980\n__MOVE_ACTUAL_POSITION 9.56140041\n__MOVE_ACTUAL_POSITION 10.0000000\nOK\nhome\nHOME\n\n__SPEED: 1.00000000\n__HOME_ACTUAL_POSITION 9.12380028\n__HOME_ACTUAL_POSITION 8.22379971\n__HOME_ACTUAL_POSITION 7.32229996\n__HOME_ACTUAL_POSITION 6.42070007\n__HOME_ACTUAL_POSITION 5.52069998\n__HOME_ACTUAL_POSITION 4.61920023\n__HOME_ACTUAL_POSITION 3.71919990\n__HOME_ACTUAL_POSITION 2.81769991\n__HOME_ACTUAL_POSITION 1.91770005\n__HOME_ACTUAL_POSITION 1.01619995\n__HOME_ACTUAL_POSITION 0.11480000\n__HOME_ACTUAL_POSITION 0.0\nOK\n\n\nmove then stop\n\nMOVE 10\n\n__SPEED: 1.00000000\n__MOVE_ACTUAL_POSITION 1.15530002\nstop__MOVE_ACTUAL_POSITION 2.35739994\n\nSTOP\n\n\nOK\n\n\nERROR INVALID COMMAND\n\n\n\nstatus while move\n\nMOVE 10\n\n__SPEED: 1.00000000\nstatus\nSTATUS\n\nERROR BUSY MOVING\n__MOVE_ACTUAL_POSITION 1.15540004\n__MOVE_ACTUAL_POSITION 2.35549998\n__MOVE_ACTUAL_POSITION 3.55749989\n__MOVE_ACTUAL_POSITION 4.75950003\n__MOVE_ACTUAL_POSITION 5.95959997\n__MOVE_ACTUAL_POSITION 7.15950012\n__MOVE_ACTUAL_POSITION 8.36159992\n__MOVE_ACTUAL_POSITION 9.56350040\n__MOVE_ACTUAL_POSITION 10.0000000\nOK\n\n\nstatus while home\n\nhome\nHOME\n\n__SPEED: 1.00000000\nstatus\\\n__HOME_ACTUAL_POSITION 9.12250042\nSTATUS\\\n\nERROR BUSY HOMING\n__HOME_ACTUAL_POSITION 8.22239971\n__HOME_ACTUAL_POSITION 7.32240009\n__HOME_ACTUAL_POSITION 6.42100000\n__HOME_ACTUAL_POSITION 5.52099991\n__HOME_ACTUAL_POSITION 4.61940002\n__HOME_ACTUAL_POSITION 3.71790004\n__HOME_ACTUAL_POSITION 2.81640005\n__HOME_ACTUAL_POSITION 1.91649997\n__HOME_ACTUAL_POSITION 1.01489997\n__HOME_ACTUAL_POSITION 0.11340000\n__HOME_ACTUAL_POSITION 0.0\nOK\n\n\"\"\"\n","sub_path":"python/pymapper/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":13918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"327934036","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom tqdm import tqdm\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nfrom torch.utils.data.sampler import Sampler\r\nfrom VGG import VGG\r\n\r\n\r\nclass ChunkSampler(Sampler):\r\n \"\"\"Samples elements sequentially from some offset.\r\n Arguments:\r\n num_samples: # of desired datapoints\r\n start: offset where we should start selecting from\r\n \"\"\"\r\n def __init__(self, num_samples, start = 0):\r\n self.num_samples = num_samples\r\n self.start = start\r\n\r\n def __iter__(self):\r\n return iter(range(self.start, self.start + self.num_samples))\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\n\r\nNUM_TRAIN = 49000\r\nNUM_VAL = 1000\r\n\r\ntransform_train = transforms.Compose([\r\n transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\r\n])\r\n\r\ntransform_test = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\r\n])\r\n\r\n\r\ndef train(from_epoch, to_epoch, learning_rate):\r\n criterion = nn.CrossEntropyLoss()\r\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4)\r\n for epoch in range(from_epoch, to_epoch): # loop over the dataset multiple times\r\n running_loss = 0.0\r\n for i, data in enumerate(trainloader, 0):\r\n # get the inputs\r\n inputs, labels = data\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n\r\n # zero the parameter gradients\r\n optimizer.zero_grad()\r\n\r\n # forward + backward + optimize\r\n outputs = net(inputs)\r\n loss = criterion(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # print statistics\r\n running_loss += loss.item()\r\n if i % 10 == 9: # print every 200 mini-batches\r\n print('[%d, %5d] loss: %.3f' %\r\n (epoch + 1, i + 1, running_loss / 200))\r\n correct = 0\r\n total = 0\r\n loss_val = 0\r\n with torch.no_grad():\r\n for data in validloader:\r\n images, labels = data\r\n images, labels = images.to(device), labels.to(device)\r\n outputs = net(images)\r\n l = criterion(outputs, labels)\r\n loss_val += l.item()\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum().item()\r\n print('Validation accuracy: %d %%' % (100 * correct / total))\r\n print('Validation loss: %.3f' % loss_val)\r\n running_loss = 0.0\r\n\r\n print('Finished Training')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\r\n download=True, transform=transform_train)\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, sampler=ChunkSampler(NUM_TRAIN, 0),\r\n num_workers=2)\r\n\r\n validset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True,\r\n transform=transform_test)\r\n validloader = torch.utils.data.DataLoader(validset, batch_size=128, sampler=ChunkSampler(NUM_VAL, NUM_TRAIN),\r\n num_workers=2)\r\n\r\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\r\n download=True, transform=transform_test)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=100,\r\n shuffle=False, num_workers=2)\r\n\r\n classes = ('plane', 'car', 'bird', 'cat',\r\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\r\n\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n print(device)\r\n net = VGG('VGG16')\r\n net.to(device)\r\n train(0, 150, 0.01)\r\n\r\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"556079045","text":"#! /usr/bin/env python\n#coding=utf-8\n\n'''\n给界面导航树使用的数据结构。\n\n\nCreated on 2011-11-2\n\n@author: kency\n\n\n'''\n\nclass TreeNode(object):\n '''\n 树形数据结构的节点。\n id 节点id,string\n name 节点显示名称,string\n children 子节点集合,TreeNode\n data 所包含的数据,object\n parent 父节点,TreeNode\n '''\n def __init__(self):\n self.id = None\n self.name = None\n self.children = []\n self.data = None\n self.parent = None\n\n def append(self, treeNode):\n self.children.append(treeNode)\n treeNode.parent = self\n\n def __repr__(self):\n if isinstance(self.name,unicode):\n name = self.name.encode('utf-8')\n else:\n name = str(self.name)\n return \"TreeNode(name=%r, id=%r, data=%r)\" % (\n str(name),\n self.id,\n self.data\n )\n def dump(self, _indent=0):\n\n return \" \" * _indent + repr(self) + \\\n \"\\n\" + \\\n \"\".join([\n c.dump(_indent + 1)\n for c in self.children]\n )\n","sub_path":"service/com/zctt/iaap/paf/core/treenode.py","file_name":"treenode.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"612730905","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 12 10:39:26 2021\n\n@author: stefan\n\"\"\"\n# This script uses the following character frequency list by Jun DA\n# Combined character frequency list of Classical and Modern Chinese\n# Obtained from: https://lingua.mtsu.edu/chinese-computing/statistics/\n\nfrom icrawler.builtin import GoogleImageCrawler\nimport pandas as pd\ndata=pd.read_csv(\"t_charfreq.csv\")\n\nfor hanzi in data['漢字']:\n google_crawler = GoogleImageCrawler(\n feeder_threads=1,\n parser_threads=1,\n downloader_threads=4,\n storage={'root_dir': 't_img/'+hanzi})\n\n google_crawler.crawl(keyword=hanzi+'書法', offset=0, max_num=4,\n min_size=None, max_size=(400,400), file_idx_offset=0)\n","sub_path":"traditional_chichaana.py","file_name":"traditional_chichaana.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"468205869","text":"#-*- coding:utf-8 -*-\n__author__ = 'TAOQIN001'\nimport pymysql\n\nclass GetMysqlDate(object):\n def __init__(self,host,user,pwd,db):\n self.host = host\n self.user = user\n self.pwd = pwd\n self.db = db\n\n def _get_connect(self):\n if not self.db:\n raise (NameError,\"there is no mysqldbname\")\n self.conn = pymysql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset='utf8')\n cur = self.conn.cursor()\n if not cur:\n raise(NameError,\"fail to connect db\")\n else:\n return cur\n\n #执行查询语句\n def exec_query(self,sql):\n cur = self._get_connect()\n cur.execute(sql.encode(\"utf-8\"))\n resList = cur.fetchall()\n self.conn.close()\n return resList\n\n #执行非查询语句\n def exec_not_query(self,sql):\n cur = self._get_connect()\n cur.execute(sql.encode(\"utf-8\"))\n self.conn.commit()\n self.conn.close()\n\n # if __name__==\"__main__\":\n # db=GetMysqlDate(\"localhost\",\"root\",\"root\",\"autotest\")\n # result = db.exec_not_query(\"INSERT INTO jk_elapsedtime_count (project_name, suite_f_name, suite_s_name, elapsed_time) VALUES ('testpro', 'testfather', 'testson', 0.311)\")\n # print result\n","sub_path":"common/getmysqldata.py","file_name":"getmysqldata.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"240136015","text":"def read_int(prompt, list_of_choices):\n \"\"\"\n Let the user pick an item out of a menu of numbered choices (starting with 1)\n :param prompt: The prompt to display to the user\n :param list_of_choices: List of strings representing the values to pick from\n :return: index of the user's choice\n \"\"\"\n selected_value = None\n while selected_value is None:\n for idx, choice_name in enumerate(list_of_choices, start=1):\n print(str(idx) + \") \" + choice_name)\n selected_str = input(prompt)\n try:\n selected_value = int(selected_str)\n if (selected_value < 1) or (selected_value > len(list_of_choices)):\n print(\"Please enter a number between %i and %i.\\n\" % (1, len(list_of_choices)))\n selected_value = None\n except ValueError:\n print(\"Unable to parse \\\"%s\\\" as a number.\\n\" % selected_str)\n return selected_value\n","sub_path":"sim/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"181604552","text":"# coding=UTF-8\n\n\n\nfrom django.shortcuts import redirect\nfrom django.template.context_processors import csrf\nfrom django.utils.html import MLStripper\nfrom django.utils.translation import ugettext as _\nfrom django.views import generic\n\nfrom app.models.conversation import Conversation, Message\nfrom app.models.personne import Personne, Activite\nfrom app.views.common import LoginRequiredMixin\n\n\nclass PostMessageView(LoginRequiredMixin, generic.TemplateView):\n \"\"\"\n Vue utilisée par celles qui gèrent l'envoi de message via les formulaires\n Au moment où j'écris il y a index.py, contact_detail.py et index.py\n \"\"\"\n url_redirect = None\n\n def get_context_data(self, **kwargs):\n context = super(PostMessageView, self).get_context_data(**kwargs)\n if self.request.session.get('message', None):\n context['message'] = self.request.session['message']\n del self.request.session['message']\n return context\n\n def post(self, request, *args, **kwargs):\n # Suppression de toutes les tentatives de hack :\n if not request.POST.get('csrfmiddlewaretoken'):\n return redirect(self.url_redirect)\n\n # ! Comparaison codée en dur, je ne sais pas comment faire autrement :\n if request.POST['csrfmiddlewaretoken'] != csrf(request)['csrf_token']:\n return redirect(self.url_redirect)\n\n if request.POST.get('message'):\n dst = None\n\n # p = Personne liée au User en cours\n p = Personne.objects.get(user=self.request.user)\n\n # c = conversation en cours\n c = None\n\n # Nettoyage du message :\n s = MLStripper()\n s.feed(request.POST['message'])\n message = s.get_data()\\\n .replace('\\n', ' ').replace('\\r', '')\n\n # ! ici deux posts possibles : Activite ou Conversation\n if request.POST.get('id_activite'):\n try:\n id_activite = int(request.POST['id_activite'])\n except ValueError:\n id_activite = None\n if isinstance(id_activite, int):\n a = Activite.objects.get(pk=id_activite)\n if a.relation:\n dst = a.relation.src\n else:\n dst = a.travel.personne\n elif request.POST.get('id_conversation'):\n try:\n id_conversation = int(request.POST['id_conversation'])\n except ValueError:\n id_conversation = None\n if isinstance(id_conversation, int):\n c = Conversation.objects.get(pk=id_conversation)\n print(c)\n m = Message.objects.filter(conversations__exact=c) \\\n .values_list('src', 'dst')\n print(m)\n\n # réduire groupes de valeurs en un tableau unique :\n # -> ids de *tous* les participants *sauf* user actuel\n m = [a for a in sorted(set().union(*m)) if a != p.pk]\n print(request.POST)\n print(m)\n\n # au moment où j'écris, uniquement *deux* participants\n # moins le user en cours :\n if len(m) == 1:\n dst = Personne.objects.get(pk=m[0])\n\n elif request.POST.get('id_personne'):\n try:\n id_personne = int(request.POST['id_personne'])\n except ValueError:\n id_personne = None\n if isinstance(id_personne, int):\n try:\n dst = Personne.objects.get(pk=id_personne)\n # (!) Reste à faire côté sécurité : vérifier que\n # dst est vraiment un contact du User en cours\n except Personne.DoesNotExist: # hack\n dst = None\n\n if isinstance(dst, Personne):\n # Ok, on sait à qui écrire :\n # conversation peut être déjà calculée avant -> vérifier :\n if isinstance(c, Conversation):\n m = Message.objects.create(src=p, dst=dst,\n message=message)\n m.save()\n c.messages.add(m)\n c.save()\n else: # Envoyer un message à l'autre personne :\n Conversation.add_message(p, dst, message)\n\n self.request.session['message'] = (\n _('Message sent'),\n _('Click to hide'))\n\n elif request.POST.get('message_id'):\n # pas de message dans le POST, message_id = \"marquer comme lu\"\n try:\n message_id = int(request.POST['message_id'])\n except ValueError:\n message_id = None\n if isinstance(message_id, int):\n try:\n m = Message.objects.get(pk=message_id)\n m.is_read = True\n m.save()\n except Message.DoesNotExist:\n pass\n\n return redirect(self.url_redirect)\n\n\n","sub_path":"app/views/my_home/post_message_view.py","file_name":"post_message_view.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"126405307","text":"\"\"\"Calculates a Checksum from CSV\"\"\"\n\nimport csv\n\ndef get_values(row):\n \"\"\"Gets the largest and smallest values from a list of numbers\"\"\"\n for curr_num in row:\n for num in row:\n if num != curr_num:\n mod_check = int(curr_num)%int(num)\n if mod_check == 0:\n value = int(curr_num)/int(num)\n return value\n\ndef main():\n \"\"\"Calculates a Checksum from CSV\"\"\"\n with open(\"../puzzle_input.csv\") as file:\n csv_reader = csv.reader(file, delimiter='\\t')\n num_list = []\n for row in csv_reader:\n value = get_values(row)\n num_list.append(int(value))\n checksum = sum(num_list)\n print(\"Checksum: %s\" % checksum)\n\nif __name__ == '__main__':\n main()\n ","sub_path":"day2/python/solve_puzzle_2.py","file_name":"solve_puzzle_2.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"223722160","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/10/10 16:41\n# @Author : cqh\n# @file : function_global.py\n# @Software \" PyCharm\nx = 50\n\ndef func():\n global x\n print('x is', x)\n x = 2\n print('Changed global x to', x)\n\nfunc()\nprint('Value of x is', x)\n","sub_path":"py_cqh/function_global.py","file_name":"function_global.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"221199144","text":"# -*- coding: utf-8 -*-\n\nfrom app import utils\nfrom app.model import Base\nfrom app.model.Base import STATUS_INVALID, STATUS_VALID\nfrom peewee import MySQLDatabase, Model, BigIntegerField,CharField, BooleanField, \\\n DecimalField, IntegerField, TextField, DateField, DateTimeField, fn\n\nfrom playhouse.shortcuts import model_to_dict, dict_to_model\n\nfrom datetime import datetime\n\n\n# 机构管理\nclass Administrator(Base.BaseModel):\n class Meta:\n db_table = 'administrator' \n\n username = CharField()\n password = CharField()\n role = IntegerField()\n options = CharField()\n name = CharField()\n sex = IntegerField() \n province = CharField()\n city = CharField()\n country = CharField()\n mobile = CharField() \n email = CharField()\n birthday = DateField(default='1970-01-01')\n education = CharField()\n lastloginip = CharField()\n lastdevice = CharField()\n lastlogintime = DateTimeField()\n status = IntegerField(default=STATUS_VALID)\n\n\n\ndef GetRecordByUsername(username):\n return Administrator.get_or_none(Administrator.username == username, Administrator.status == STATUS_VALID)\n \n\ndef IsExistId(userid):\n return Administrator.get_or_none(Administrator.id == userid, Administrator.status == STATUS_VALID)\n\n\ndef UpdateRecrodByUsername(mDict):\n Administrator.update(**mDict).where(Administrator.username==mDict['username']).execute()","sub_path":"server/app/model/Administrator.py","file_name":"Administrator.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"514974130","text":"class Zone():\n \"\"\"\n Class representing Zones (Entrance, Paths or points of view)\n \"\"\"\n def __init__(self, idx, category):\n \"\"\"Simple constructor\"\"\"\n self.id = idx\n \n if category == 1: self.category = 'point of view'\n elif category == 2: self.category = 'entrance'\n elif category == 3: self.category = 'middle point'\n\n self.connections = []\n self.nb_connections = 0\n \n self.max_connections = -1\n self.closest_entrance_cost = -1\n self.connected_to_entrance = (category == 'entrance')\n \n def reinitialize(self):\n \"\"\"Reset zone to its initial state (remove connections)\"\"\"\n self.connections = []\n self.nb_connections = 0\n self.connected_to_entrance = (self.category == 'entrance')\n\n def __str__(self):\n \"\"\"Print a lot of stuff for debugging purposes\"\"\"\n print(\"id:\", self.id, \"cat:\", self.category, \"nb_connections:\", self.nb_connections, \"/\", self.max_connections, \"connected_to_entrance:\", self.connected_to_entrance)\n return \"\"\n \n def find_closest_entrance_cost(self, entrances, costs):\n \"\"\"\n Since every (zone,zone) pair has a valid cost, we find the closest one\n for each zone. It will be used as an heuristic\n \"\"\"\n if self.category == 'entrance':\n self.closest_entrance_cost = 0\n return\n \n minimum = costs[self.id, entrances[0].id]\n \n for x in entrances:\n if minimum > costs[self.id, x.id]:\n minimum = costs[self.id,x.id] \n self.closest_entrance_cost = minimum\n\n def is_valid(self):\n \"\"\"Verify the validity of a zone\"\"\"\n return (\n (self.connected_to_entrance) and\n (self.nb_connections <= self.max_connections) and\n (self.category == 'point of view' and self.nb_connections == 1) or\n (self.category == 'entrance' and self.nb_connections > 0) or\n (self.category == 'middle point' and self.nb_connections >= 2)\n )\n","sub_path":"src/zone.py","file_name":"zone.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185856135","text":"\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\nimport os\nfrom resource_management import *\nfrom subprocess import *\n\ndef check_rc(rc,stdout=None,stderr=None):\n if rc == 2:\n Logger.error(\"Code 2: Invalid argument\\n%s\" % stderr)\n raise InvalidArgument(stderr)\n if rc == 3:\n Logger.error(\"Code 3: Component is Not Running\\n%s\" % stderr)\n raise ComponentIsNotRunning(stderr)\n if rc > 0:\n Logger.error(\"Code %d: Undefined error\\n%s\" % (rc,stderr))\n raise Fail(stderr)\n\ndef execute_sudo_krb(cmd,user=None,principal=None,keytab=None,keytab_cache=None,input=None,shell=False):\n import params\n \n secure = params.security_enabled\n user = user or params.hdfs_user\n principal = principal or params.hdfs_principal_name\n keytab = keytab or params.hdfs_user_keytab\n keytab_cache = keytab_cache or params.kerberos_cache_file\n \n auth_token=None\n \n if secure:\n import kerberosWrapper\n auth_token = kerberosWrapper.krb_wrapper(principal,keytab,keytab_cache)\n os.environ['KRB5CCNAME'] = keytab_cache\n else:\n cmd_aux = [\"su\",\"-s\",\"/bin/bash\",user,\"-c\"]\n cmd_aux.append(' '.join(cmd))\n cmd = cmd_aux\n Logger.info(\"Executing %s\" % str(cmd)) \n executed=Popen(cmd,stdin=PIPE,stdout=PIPE,stderr=PIPE,shell=False)\n out,err=executed.communicate(input=input)\n if secure and auth_token:\n auth_token.destroy()\n\n return out,err,executed.returncode\n \n","sub_path":"KEEDIO/1.4/services/IPA/package/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"648762647","text":"import json\n\nfrom bson import ObjectId\nfrom flask import request\n\nfrom api import Data\nfrom database.data.child import lookup, delete, edit\n\n\nclass Child(Data):\n def patch(self, child_id):\n operations = [\"add\", \"remove\"]\n\n commands = request.values.to_dict()\n if len(commands) < 1:\n return [\"submit changes in the form of\", {\"op\": \"val\", \"args\": \"val\"}], 400\n\n if len(commands) == 1:\n for key in commands:\n commands = json.loads(key)\n break\n\n for instruction in commands:\n\n try:\n assert isinstance(instruction, dict)\n except AssertionError:\n return {\"must pass commands as a dictionary, eg\": [{\"op\": \"1\"}, {\"op\": \"2\"}]}, 400\n if \"op\" not in instruction:\n # not a valid instruction\n return {\"no keyword op in received:\": instruction}, 400\n if instruction['op'] not in operations:\n return {\"valid operations are\": operations}, 400\n\n if instruction['op'] == 'add':\n if 'id' not in instruction:\n return {\"need field \\'id\\'\": instruction}, 400\n else:\n items = lookup(child_id)[0]['items']\n print(\"items is\", items)\n items.append(ObjectId(instruction['id']))\n\n return edit(child_id, items=items)\n\n if instruction['op'] == 'remove':\n if 'id' not in instruction:\n return {\"need field \\'id\\'\": instruction}, 400\n else:\n items = lookup(child_id)[0]['items']\n print(\"items is\", items)\n try:\n items.remove(ObjectId(instruction['id']))\n except ValueError:\n return {\"not in list\": instruction['id']}, 404\n\n return edit(child_id, items=items)\n\n def delete(self, child_id):\n\n return delete(child_id)\n\n def get(self, child_id):\n (rv, x) = lookup(child_id)\n\n return rv, x\n","sub_path":"backend/stocklist-backend/api/data/child.py","file_name":"child.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"254136272","text":"import socket\nimport unittest\nfrom os import path\n\nimport walkoff.appgateway\nimport walkoff.case.database as case_database\nimport walkoff.case.subscription as case_subscription\nimport walkoff.config.config\nimport walkoff.controller\nimport walkoff.core.multiprocessedexecutor\nfrom walkoff.core.multiprocessedexecutor.multiprocessedexecutor import MultiprocessedExecutor\nfrom tests import config\nfrom tests.util.mock_objects import *\n\ntry:\n from importlib import reload\nexcept ImportError:\n from imp import reload\n\n\nclass TestWorkflowManipulation(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n walkoff.appgateway.cache_apps(config.test_apps_path)\n walkoff.config.config.load_app_apis(apps_path=config.test_apps_path)\n walkoff.config.config.num_processes = 2\n MultiprocessedExecutor.initialize_threading = mock_initialize_threading\n MultiprocessedExecutor.wait_and_reset = mock_wait_and_reset\n MultiprocessedExecutor.shutdown_pool = mock_shutdown_pool\n walkoff.controller.controller.initialize_threading()\n\n def setUp(self):\n self.controller = walkoff.controller.controller\n self.controller.workflows = {}\n self.controller.load_playbooks(\n resource_collection=path.join(\".\", \"tests\", \"testWorkflows\", \"testGeneratedWorkflows\"))\n self.controller.load_playbook(\n resource=path.join(config.test_workflows_path, 'simpleDataManipulationWorkflow.playbook'))\n self.id_tuple = ('simpleDataManipulationWorkflow', 'helloWorldWorkflow')\n self.testWorkflow = self.controller.get_workflow(*self.id_tuple)\n self.testWorkflow.set_execution_uid('some_uid')\n case_database.initialize()\n\n def tearDown(self):\n self.controller.workflows = None\n case_database.case_db.tear_down()\n case_subscription.clear_subscriptions()\n reload(socket)\n\n @classmethod\n def tearDownClass(cls):\n walkoff.appgateway.clear_cache()\n walkoff.controller.controller.shutdown_pool()\n\n def test_pause_and_resume_workflow(self):\n self.controller.load_playbook(resource=path.join(config.test_workflows_path, 'pauseWorkflowTest.playbook'))\n\n uid = None\n result = dict()\n result['paused'] = False\n result['resumed'] = False\n\n def workflow_paused_listener(sender, **kwargs):\n result['paused'] = True\n self.controller.resume_workflow(uid)\n\n WalkoffEvent.WorkflowPaused.connect(workflow_paused_listener)\n\n def workflow_resumed_listener(sender, **kwargs):\n result['resumed'] = True\n\n WalkoffEvent.WorkflowResumed.connect(workflow_resumed_listener)\n\n def pause_resume_thread():\n self.controller.pause_workflow(uid)\n return\n\n def action_1_about_to_begin_listener(sender, **kwargs):\n threading.Thread(target=pause_resume_thread).start()\n\n WalkoffEvent.WorkflowExecutionStart.connect(action_1_about_to_begin_listener)\n\n uid = self.controller.execute_workflow('pauseWorkflowTest', 'pauseWorkflow')\n self.controller.wait_and_reset(1)\n self.assertTrue(result['paused'])\n self.assertTrue(result['resumed'])\n\n def test_change_action_input(self):\n arguments = [{'name': 'call', 'value': 'CHANGE INPUT'}]\n\n result = {'value': None}\n\n def action_finished_listener(sender, **kwargs):\n result['value'] = kwargs['data']\n\n WalkoffEvent.ActionExecutionSuccess.connect(action_finished_listener)\n\n self.controller.execute_workflow('simpleDataManipulationWorkflow', 'helloWorldWorkflow',\n start_arguments=arguments)\n self.controller.wait_and_reset(1)\n self.assertDictEqual(result['value'],\n {'result': 'REPEATING: CHANGE INPUT', 'status': 'Success'})\n","sub_path":"tests/test_workflow_manipulation.py","file_name":"test_workflow_manipulation.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"183778398","text":"\"\"\"\nYou are given an array (which will have a length of at least 3, \nbut could be very large) containing integers. The array is either \nentirely comprised of odd integers or entirely comprised of even integers \nexcept for a single integer N. Write a method that takes the array \nas an argument and returns this \"outlier\" N.\n\"\"\"\n\ndef find_outlier(integers):\n odd_count = 0\n even_count = 0\n the_intruder = 0\n for digit in integers:\n if digit%2 == 0:\n odd_count+=1\n else:\n even_count+=1\n if even_count > odd_count:\n for digit in integers:\n if digit%2 == 0:\n the_intruder = digit\n else:\n for digit in integers:\n if digit%2 != 0:\n the_intruder = digit\n return the_intruder\n\nprint(find_outlier([160, 3, 1719, 19, 11, 13, -21]))","sub_path":"chase_the_intruder.py","file_name":"chase_the_intruder.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"265131909","text":"from pyspark.ml import Pipeline\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.feature import Binarizer\n\nif __name__ == \"__main__\":\n spark = SparkSession\\\n .builder\\\n .appName(\"BinarizerExample\")\\\n .getOrCreate()\n\n continuousDataFrame = spark.createDataFrame([(4)], [ \"feature\"])\n binarizer = Binarizer(threshold=5, inputCol=\"feature\", outputCol=\"binarized_feature\")\n pipeline = Pipeline(stages=[binarizer])\n pipeline = pipeline.fit(continuousDataFrame)\n pipeline.write().overwrite().save(\"binarizer\")","sub_path":"examples/binarizer/bin_train.py","file_name":"bin_train.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"499233750","text":"\"\"\"blossomac URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nfrom datascience import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.home, name='index'),\n url(r'^enterprise/', views.join_blossom, name='fasttrack'),\n url(r'^immersive/', views.become_partner, name='immersive'),\n url(r'^about/', views.about_us, name='about'),\n url(r'^privacypolicy/', views.privacy_policy, name='privacypolicy'),\n url(r'^terms/', views.terms_services, name='terms'),\n url(r'^faqs/', views.faqs, name='faqs')\n]\n ","sub_path":"blossomac/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"120690711","text":"####\n# CONFIGURATION\n####\nBUCKET_LANDING = \"djm2-lake-landing\"\nBUCKET_CURATED = \"djm2-lake-curated\"\n\n# Read the JSON data as a data frame\nlanded_data = \"s3://\"+BUCKET_LANDING+\"/fda/2019-08-10/drug/label/part-*\"\ndataframe = spark.read.json(landed_data)\n\n# Store refined data as Parquet\ndataframe.write.mode(\"overwrite\").parquet(\"s3://\"+BUCKET_CURATED+\"/fda/drug/label/\")\n","sub_path":"03_FDA_Labels/fda.02.curate.py","file_name":"fda.02.curate.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"96835050","text":"from member.constants import UserRoles\n\n\ndef roles(request):\n return {\n 'USER': UserRoles.USER,\n 'MEMBRU_ASPIRANT': UserRoles.MEMBRU_ASPIRANT,\n 'TEMERAR': UserRoles.TEMERAR,\n 'EXPLORATOR': UserRoles.EXPLORATOR,\n 'SENIOR': UserRoles.SENIOR,\n 'LIDER': UserRoles.LIDER,\n 'LIDER_ASISTENT': UserRoles.LIDER_ASISTENT,\n 'VOLUNTAR': UserRoles.VOLUNTAR,\n\n 'STAFF': UserRoles.STAFF,\n 'ADMIN': UserRoles.ADMIN,\n }\n","sub_path":"web/member/context_processor/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"257642714","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Resource, Api\nimport sqlite3\n\n\napp = Flask(__name__)\napi = Api(app)\n\n\nclass MenuSelection(Resource):\n def get(self,id):\n try:\n dbResponse = getFromDataBase(id)\n MenuSelectionX = [{\n \"id\" : dbResponse[0], #this is the same as the passed ID value\n \"name\" : dbResponse[1]\n }]\n\n return jsonify({\"success\": True, \"MenuSection\" : MenuSelectionX})\n except:\n \n return jsonify({\"success\": False})\n\n \n def post(self,id):\n try:\n name = request.json['name'] #GRAB THE NAME FEILD FROM THE JSON REQUEST\n getFromDataBase(id) #if the ID is not the database, this will throw \n updateInDataBase(id,name)\n MenuSelectionX = [{\n \"id\" : id,\n \"name\" : name\n }]\n\n return jsonify({\"success\": True, \"MenuSection\" : MenuSelectionX})\n except:\n \n return jsonify({\"success\": False})\n\n def delete(self,id):\n try:\n getFromDataBase(id) #IF THE ENTRY IS NOT IN THE DATABASE, THIS WILL THROW AN EXCEPTION SO THE DELETION IS FALSE AS IT DID NOT HAPPEN \n deleteFromDataBase(id)\n return jsonify({\"success\": True})\n except:\n return jsonify({\"success\": False})\n\n\nclass AllSections(Resource):\n def get(self):\n\n try:\n dbResponse = getAllFromDataBase()\n MenuSelectionX = []\n #DbResponse WILL CONTAIN ALL ENTRIES IN THE DATABASE, SO LOOP THROUGH AND APPEND TO THE OUTPUT VARIABLE MenuSelectionX\n for entry in dbResponse:\n MenuSelectionX.append({\n \"id\" : entry[0], #this is the same as the passed ID value\n \"name\" : entry[1]\n })\n\n return jsonify({\"success\": True, \"MenuSection\" : MenuSelectionX})\n except:\n \n return jsonify({\"success\": False})\n\n\n def put(self):\n\n try:\n name = request.json['name']\n newId = putInDataBase(name)\n MenuSelectionX = [{\n \"id\" : newId,\n \"name\" : name\n }]\n\n return jsonify({\"success\": True, \"MenuSection\" : MenuSelectionX})\n except:\n return jsonify({\"success\": False})\n\n\n\n#DEFINE API ROUTES\napi.add_resource(MenuSelection, \"/menusection/\") #USE THIS API WHEN ADDRESSING A GIVEN ID \napi.add_resource(AllSections, \"/menusection\") #USE THIS API WHEN AN ID IS NOT APPROPRIATE \n\n\n#DATABASE INTERACTIONS\ndef create_table():\n conn = sqlite3.connect(\"Menu.db\") #CONNECT TO THE DATABASE\n c = conn.cursor() #CURSOR\n c.execute('CREATE TABLE IF NOT EXISTS MenuSections(id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)') #SQL CREATE TABLE STATMENT \n c.close()\n conn.close()\n\n\ndef putInDataBase(name):\n conn = sqlite3.connect(\"Menu.db\") \n c = conn.cursor()\n c.execute(\"INSERT INTO MenuSections (name) VALUES(?)\",(name,)) #SQL INSERT STATEMENT\n c.execute(\"select last_insert_rowid()\")\n newID = c.fetchall()[0][0] #GRAB THE NEW ID TO RETURN TO THE CLIENT \n conn.commit()\n c.close()\n conn.close()\n return newID\n\ndef updateInDataBase(id,name):\n conn = sqlite3.connect(\"Menu.db\") \n c = conn.cursor()\n c.execute(\"Update MenuSections SET name=(?) WHERE id= (?)\",(name,id,)) #SQL UPDATE ROW STATEMENT\n conn.commit()\n c.close()\n conn.close()\n \n\ndef deleteFromDataBase(id):\n conn = sqlite3.connect(\"Menu.db\") \n c = conn.cursor()\n c.execute(\"DELETE FROM MenuSections WHERE id=(?)\",(id,)) #SQL DELETE STATMENT\n conn.commit()\n \ndef getFromDataBase(id):\n conn = sqlite3.connect(\"Menu.db\") \n c = conn.cursor()\n c.execute(\"SELECT * FROM MenuSections WHERE id=(?)\",(id,)) #SQL SELECT STATMENT\n return c.fetchall()[0]\n\ndef getAllFromDataBase():\n conn = sqlite3.connect(\"Menu.db\") \n c = conn.cursor()\n c.execute(\"SELECT * FROM MenuSections\") #SQL SELECT ALL STATMENT\n return c.fetchall()\n \n\n#CREATE TABLE IF NEEDED ON LAUNCH OF THE CLIENT \ncreate_table()\n\nif __name__ == '__main__':\n app.run(debug=True) \n","sub_path":"MenuAPI.py","file_name":"MenuAPI.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"132652326","text":"import logging\nfrom io import StringIO\nimport requests\nimport csv\n\nfrom plan.api.data import Record, RecordException\nfrom plan.externals import gis\nfrom plan.models import Vehicle, Route\n\nlogger = logging.getLogger('planndit.externals.vehicles')\nwebfleet_url = 'https://csv.business.tomtom.com/extern'\napikey = '95419BAC-2AAE-11E3-A0F0-8FE0ADFC9456'\n\n\ndef vehicle_sync(user):\n if user.account.webfleet_account is None or user.account.webfleet_account == '':\n return 0\n reader = webfleet_request(user.account, 'showObjectReportExtern')\n\n vehicles = Vehicle.objects.filter(account=user.account)\n vehicles_map = {}\n for vehicle in vehicles:\n vehicles_map[vehicle.external_id] = vehicle\n\n new_vehicles = []\n update_vehicles = []\n update_locations = []\n\n for row in reader:\n try:\n vehicle = vehicles_map[row['objectno']]\n if vehicle is not None:\n if vehicle.external_id != row['objectno'] or vehicle.name != row['objectname']:\n update_vehicles += [[vehicle, row]]\n else:\n update_locations += [[vehicle, row]]\n vehicles_map.pop(row['objectno'])\n except KeyError as e:\n new_vehicles += [row]\n pass\n\n object_update = 0\n\n for vehicleData in new_vehicles:\n vehicle = Vehicle(account=user.account)\n vehicle = update_vehicle(vehicle=vehicle, data=vehicleData)\n vehicle.save()\n object_update += 1\n\n for vehicleData in update_vehicles:\n vehicle = vehicleData[0]\n vehicle = update_vehicle(vehicle=vehicle, data=vehicleData[1])\n vehicle.save()\n object_update += 1\n\n for vehicleData in update_locations:\n vehicle = vehicleData[0]\n vehicle = update_location(vehicle=vehicle, data=vehicleData[1])\n vehicle.save()\n\n\n # for vehicleData in vehicles_map:\n # todo to be deleted\n\n return object_update\n\n\ndef send_orders(account, route_id):\n route = Route.objects.filter(account=account, id=route_id)\n if route.count() != 1:\n return\n route = route[0]\n\n orders = route.orders.filter(location__is_valid=True).all()\n for number, order in enumerate(orders):\n description = \"#{number} {description}\".format(number=number + 1, description=order.commentary)\n description += '\\n\\rAddress: {address}'.format(address=order.location.address)\n for item in order.orderitem_set.all():\n description += \"\\n\\r{key}: {value}\".format(key=item.key, value=item.value)\n data = {\n 'objectno': route.vehicle.external_id,\n 'orderid': order.id,\n 'ordertext': description,\n 'ordertype': 3,\n 'longitude': round(order.location.longitude * 1000000),\n 'latitude': round(order.location.latitude * 1000000),\n 'city': order.location.city,\n 'zip': order.location.postcode,\n 'orderdate': route.date.strftime(\"%d/%m/%y\") + \"'TZ\", # todo format\n }\n webfleet_request(account, 'sendDestinationOrderExtern', data)\n route.status = 'Sent'\n route.save()\n\n\ndef webfleet_request(account, action, params=None):\n if not params:\n params = {}\n params.update(get_auth(account))\n params['lang'] = 'en'\n params['action'] = action\n result = requests.get(webfleet_url, params)\n if result.headers.get('X-Webfleet-Errorcode'):\n raise RecordException(Record.serialize(False, result.headers.get('X-Webfleet-Errormessage')))\n reader = parse_csv(result.text)\n return reader # for row in reader: row['col_name']\n\n\ndef parse_csv(response):\n f = StringIO(response)\n reader = csv.DictReader(f, delimiter=';')\n return reader\n\n\ndef get_auth(account):\n return {\n 'account': account.webfleet_account,\n 'username': account.webfleet_username,\n 'password': account.webfleet_password,\n 'apikey': apikey\n }\n\n\ndef update_vehicle(vehicle, data):\n vehicle.name = data['objectname']\n vehicle.external_id = data['objectno']\n return update_location(vehicle, data)\n\n\ndef update_location(vehicle, data):\n latitude = int(data['latitude_mdeg']) / 1000000\n longitude = int(data['longitude_mdeg']) / 1000000\n location = gis.reverse_geocoding(latitude, longitude)\n vehicle.location = location\n return vehicle","sub_path":"plan/externals/webfleet.py","file_name":"webfleet.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"639984171","text":"import os\nimport pickle\nimport tempfile\nimport subprocess\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Sequential, Model , load_model\nfrom keras.layers import Dense, Input, SimpleRNN, LSTM, Dropout\nfrom keras.utils import to_categorical\nfrom keras import optimizers\n\nfrom basescript import BaseScript\nfrom diskarray import DiskArray\n\nclass StringEmbeddingsScript(BaseScript):\n CHAR_NONE = '\\x00'\n CHAR_START = '\\x01'\n CHAR_END = '\\x02'\n\n def create_model(self, num_units, word_len, num_unique_chars):\n input_shape = (word_len, num_unique_chars)\n\n model = Sequential()\n model.add(LSTM(num_units, input_shape=input_shape, unroll=True))\n model.add(Dense(num_unique_chars, activation='softmax'))\n\n model.compile(optimizer=optimizers.Adam(lr=0.002),\n loss='categorical_crossentropy',\n metrics=['mse'])\n return model\n\n def execute_cmd(self, cmd):\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n result = p.stdout.read().strip()\n\n return result.decode('utf-8')\n\n def get_char_to_int(self, fpath):\n chars_cmd = \"fold -w1 {0} | sort -u\".format(fpath)\n chars = self.execute_cmd(chars_cmd)\n chars = chars.split('\\n')\n\n max_len_cmd = 'cat {0} | py -x \"len(x)\" | sort -n | tail -1'.format(fpath)\n max_len = self.execute_cmd(max_len_cmd)\n max_len = int(max_len) + 2 # adding 2 for start and end chars\n self.log.info('calculating max lenght is done')\n\n nwords_cmd = 'cat {0} | py -x \"len(x)\" | sort -n | wc -l'.format(fpath)\n nwords = self.execute_cmd(nwords_cmd)\n nwords = int(nwords)\n self.log.info('calculating nwords is done')\n\n chars = [self.CHAR_NONE, self.CHAR_START, self.CHAR_END] + chars\n charmap = { c: i for i, c in enumerate(chars) }\n nchars = len(chars)\n\n return max_len, nchars, nwords, charmap\n\n def load_data(self, max_len, nchars, nwords, charmap):\n char_none = to_categorical(charmap[self.CHAR_NONE], num_classes=nchars)\n data = DiskArray(self.args.training_data, shape=(nwords, max_len, nchars), dtype=np.float32)\n labels = DiskArray(self.args.labels_data, shape=(nwords, nchars), dtype=np.float32)\n\n f = open(self.args.text)\n for i, line in enumerate(f):\n line = line.strip()\n w = line[:-1]\n last_char = line[-1]\n w = '%s%s%s' % (self.CHAR_START, w, self.CHAR_END)\n w = [to_categorical(charmap[x], num_classes=nchars) for x in w]\n w = w + ([char_none] * (max_len - len(w)))\n data[i] = w\n labels[i] = to_categorical(charmap[last_char], num_classes=nchars)\n\n self.log.info('generating vectors is done')\n data.flush()\n labels.flush()\n return data, labels\n\n def get_test_data(self, max_len, nchars, nwords, words, charmap):\n char_none = to_categorical(charmap[self.CHAR_NONE], num_classes=nchars)\n data = np.zeros(shape=(nwords, max_len, nchars), dtype=np.float32)\n labels = np.zeros(shape=(nwords, nchars), dtype=np.float32)\n\n for i in range(nwords):\n w = words[i][:-1]\n last_char = words[i][-1]\n w = '%s%s%s' % (self.CHAR_START, w, self.CHAR_END)\n w = [to_categorical(charmap[x], num_classes=nchars) for x in w]\n w = w + ([char_none] * (max_len - len(w)))\n data[i] = w\n labels[i] = to_categorical(charmap[last_char], num_classes=nchars)\n\n return data, labels\n\n def run(self):\n\n fpath = self.args.text\n\n max_len, nchars, nwords, charmap = self.get_char_to_int(fpath)\n\n disk_array = DiskArray(self.args.out_f, shape=(0,), dtype=[('vec', np.float32, 128)])\n if not os.path.exists(self.args.training_data):\n data, labels = self.load_data(max_len, nchars, nwords, charmap)\n else:\n data = DiskArray(self.args.training_data, dtype=np.float32)\n labels = DiskArray(self.args.labels_data, dtype=np.float32)\n\n if not os.path.exists(self.args.model_name):\n model = self.create_model(128, max_len, nchars)\n self.log.info('Started training the model')\n history = model.fit(data[:], labels[:], epochs=self.args.epochs, batch_size=128)\n plt.plot(history.history['loss'])\n plt.savefig(self.args.image_name)\n else:\n model = load_model(self.args.model_name)\n\n model.save(self.args.model_name)\n\n self.log.info('Accessing the layer weights')\n new_model = Sequential()\n new_model.add(LSTM(128, input_shape=(max_len, nchars), unroll=True))\n weights = model.layers[0].get_weights()\n new_model.set_weights(weights)\n\n self.log.info('started predicting')\n for word in open(fpath):\n word = word.strip()\n test_data, test_lables = self.get_test_data(max_len, nchars, 1, [word], charmap)\n p_out = new_model.predict(test_data)\n disk_array.append((p_out[0],))\n\n disk_array.flush()\n\n def define_args(self, parser):\n parser.add_argument('text', help='input text file')\n parser.add_argument('training_data', help='training file')\n parser.add_argument('labels_data', help='labels file')\n parser.add_argument('epochs', type=int, help='num of epochs')\n parser.add_argument('model_name', help='model name to save')\n parser.add_argument('image_name', help='image name')\n parser.add_argument('out_f', help='out_f name')\n\nif __name__ == '__main__':\n StringEmbeddingsScript().start()\n","sub_path":"nn_scripts/chatra_rnn_new_modifications.py","file_name":"chatra_rnn_new_modifications.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"199101223","text":"import numpy as np\nimport cv2\n\ncanvas = np.zeros((300, 300, 3), dtype = \"uint8\")\n(centreX, centreY) = (canvas.shape[1] / 2, canvas.shape[0] / 2)\nwhite = (255, 255, 255)\n\nfor r in xrange(0, 175, 25):\n\tcv2.circle(canvas, (centreX, centreY), r, white)\n\ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n\nfor i in xrange(0, 25):\n\tradius = np.random.randint(5, high = 200)\n\tcolour = np.random.randint(0, high = 256, size = (3,)).tolist()\n\tpt = np.random.randint(0, high = 300, size = (2,))\n\tcv2.circle(canvas, tuple(pt), radius, colour, -1)\n\ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n","sub_path":"drawing/circles.py","file_name":"circles.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"653071277","text":"import sys\nfrom PyQt5 import QtWidgets as qtw\nfrom PyQt5 import QtGui as qtg\nfrom PyQt5 import QtCore as qtc\n\nclass SearchWidget(qtw.QWidget):\n\n submitted = qtc.pyqtSignal(str, bool)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setLayout(qtw.QFormLayout())\n self.term_input = qtw.QLineEdit()\n self.case_checkbox = qtw.QCheckBox('Match case')\n search_image = qtg.QPixmap('search.svg')\n gear_image = qtg.QPixmap('gear.svg')\n search_icon = qtg.QIcon(search_image)\n search_icon.addPixmap(gear_image, qtg.QIcon.Disabled)\n self.submit_button = qtw.QPushButton(\n 'Submit',\n icon=search_icon,\n clicked=self.on_submit,\n )\n self.submit_button.setEnabled(False)\n self.layout().addRow(qtw.QLabel(pixmap=search_image))\n self.layout().addRow('Search', self.term_input)\n self.layout().addRow(self.case_checkbox)\n self.layout().addRow('', self.submit_button)\n\n self.term_input.textChanged.connect(self.check_term)\n\n def check_term(self, term):\n\n if term:\n self.submit_button.setEnabled(True)\n else:\n self.submit_button.setEnabled(False)\n\n def on_submit(self):\n term = self.term_input.text()\n do_case = (\n self.case_checkbox.checkState() == qtc.Qt.Checked\n )\n self.submitted.emit(term, do_case)\n\n\nclass MainWindow(qtw.QMainWindow):\n\n def __init__(self):\n \"\"\"MainWindow constructor.\"\"\"\n super().__init__()\n\n # Central Widget\n\n self.textedit = qtw.QTextEdit()\n self.setCentralWidget(self.textedit)\n\n # Menu Bar\n\n menu = self.menuBar() # -> QMenuBar\n file_menu = menu.addMenu('File') # -> QMenu\n save_act = file_menu.addAction('Save', self.save) # -> QAction\n # Add keyboard shortcuts using QKeySequence constants\n file_menu.addAction(\n 'Open',\n self.open,\n # This uses a platform-appropriate Open shortcut:\n qtg.QKeySequence.Open\n )\n\n # Add a shortcut after the fact:\n save_act.setShortcut(qtg.QKeySequence.Save)\n file_menu.addSeparator()\n file_menu.addAction(\n 'Quit',\n self.close,\n qtg.QKeySequence.Quit\n )\n\n # ToolBar\n edit_toolbar = self.addToolBar('Edit')\n # To use an icon, add it in as the first argument\n\n copy_icon = qtg.QIcon(qtg.QPixmap('copy.svg'))\n cut_pixmap = qtg.QPixmap('cut.svg')\n undo_icon = qtg.QIcon(qtg.QPixmap('undo.svg'))\n\n qtg.QIcon.setThemeName('theme that doesnot exist')\n edit_toolbar.addAction(copy_icon, 'copy', self.textedit.copy)\n edit_toolbar.addAction(qtg.QIcon(cut_pixmap), 'cut', self.textedit.cut)\n edit_toolbar.addAction(qtg.QIcon(qtg.QPixmap('paste.svg')), 'paste', self.textedit.paste)\n edit_toolbar.addAction(qtg.QIcon.fromTheme('edit-undo', undo_icon), 'undo', self.textedit.undo)\n edit_toolbar.addAction(qtg.QIcon(qtg.QPixmap('redo.svg')), 'redo', self.textedit.redo)\n\n # Status bar\n\n self.statusBar().showMessage('Welcome to my text editor', 5000)\n\n # Dockable widget\n search_dock = qtw.QDockWidget('Search')\n self.addDockWidget(\n qtc.Qt.RightDockWidgetArea,\n search_dock\n )\n # You can prevent a dock from floating, closing\n # Or moving by leaving out any of these items:\n search_dock.setFeatures(\n qtw.QDockWidget.DockWidgetClosable |\n qtw.QDockWidget.DockWidgetMovable |\n qtw.QDockWidget.DockWidgetFloatable\n )\n\n search_widget = SearchWidget()\n search_dock.setWidget(search_widget)\n search_widget.submitted.connect(self.search)\n\n self.show()\n\n def save(self):\n text = self.textedit.toPlainText()\n filename, _ = qtw.QFileDialog.getSaveFileName()\n if filename:\n with open(filename, 'w') as handle:\n handle.write(text)\n self.statusBar().showMessage(f'Saved to {filename}')\n\n def open(self):\n filename, _ = qtw.QFileDialog.getOpenFileName()\n if filename:\n with open(filename, 'r') as handle:\n text = handle.read()\n self.textedit.clear()\n self.textedit.insertPlainText(text)\n self.textedit.moveCursor(qtg.QTextCursor.Start)\n self.statusBar().showMessage(f'Editing {filename}')\n\n def search(self, term, case_sensitive=False):\n if case_sensitive:\n cur = self.textedit.find(\n term,\n qtg.QTextDocument.FindCaseSensitively\n )\n else:\n cur = self.textedit.find(term)\n if not cur:\n self.statusBar().showMessage('No matches Found', 2000)\n\n\nif __name__ == '__main__':\n app = qtw.QApplication(sys.argv)\n mw = MainWindow()\n sys.exit(app.exec())\n","sub_path":"PyQtIconsAndImages/texteditor.py","file_name":"texteditor.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"78562085","text":"from tapis_cli.display import Verbosity\nfrom tapis_cli.search import Argdef, argmod, argtype, optionize\n\n__all__ = ['TapisModel']\n\n\nclass TapisModel(object):\n \"\"\"Base class for Tapis models\n \"\"\"\n\n SEARCH_ARGS = []\n service_id_type = 'Unknown'\n\n format_many = False\n payload = {}\n fields = []\n\n def add_field(self,\n param_name,\n param_type,\n only_detail,\n mods_allowed,\n default_mod,\n value_choices=None,\n param_opt=None,\n searchable=False):\n \"\"\"Add a searchable field\n \"\"\"\n arg = Argdef(param_name, param_type, only_detail, mods_allowed,\n default_mod, value_choices, param_opt, searchable)\n if arg not in self.fields:\n self.fields.append(arg)\n return self\n\n def add_fields(self, fields):\n \"\"\"Bulk add multiple searchable fields\n \"\"\"\n for f in fields:\n self.add_field(*f)\n return self\n\n def get_args(self, list_only=False):\n pass\n\n @classmethod\n def optionize(cls, text_string):\n \"\"\"Render a field name as an option\n \"\"\"\n return optionize(text_string)\n\n @classmethod\n def argify(cls, arg_name, arg_type, arg_help=None):\n pass\n\n def __init__(self):\n self.add_fields(self.SEARCH_ARGS)\n\n def get_headers(self, verbosity_level=None, formatter='table'):\n if verbosity_level is None:\n verbosity_level = Verbosity.LISTING\n headers = list()\n for f in self.fields:\n # print('{}: {}> = {}'.format(f, verbosity_level, f.verbosity))\n if verbosity_level >= f.verbosity:\n if argtype.format_allows_param_type(f, formatter):\n headers.append(f.param_name)\n return headers\n\n @classmethod\n def render_key_value(cls, key, value):\n \"\"\"Overridable function to how JSON key/values should be transformed\n \"\"\"\n return key, value\n\n @classmethod\n def transform_response(cls, response_json):\n \"\"\"Apply an intermediate transform to a JSON document\n \"\"\"\n transformed = dict()\n for k, v in response_json.items():\n k1, v1 = cls.render_key_value(k, v)\n transformed[k1] = v1\n return transformed\n","sub_path":"tapis_cli/commands/taccapis/__model.py","file_name":"__model.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"544962412","text":"# -*- coding: utf-8 -*-\n\nfrom rest_framework import serializers\n\nfrom admin_app.market.looks.models import Look\nfrom admin_app.market.looks.models import LookArea\nfrom .products import ProductSerializer\nfrom ..core import ContentSerializer\nfrom ..serializers import AlternativeSerializerMixin\nfrom ..serializers import DynamicFieldsModelSerializer\n\n\nclass LookAreaSerializer(\n AlternativeSerializerMixin,\n serializers.ModelSerializer\n):\n product = ProductSerializer(fields=ProductSerializer.Meta.list_fields)\n\n class Meta:\n model = LookArea\n fields = (\n 'id',\n 'x',\n 'y',\n 'width',\n 'height',\n 'product',\n )\n\n\nclass LookSerializer(DynamicFieldsModelSerializer, ContentSerializer):\n areas = LookAreaSerializer(many=True, read_only=True)\n\n class Meta:\n model = Look\n fields = ContentSerializer.Meta.fields + (\n 'gender',\n 'price',\n 'areas',\n )\n list_fields = (\n 'id',\n 'title',\n 'text',\n 'slug',\n 'main_image',\n 'gender',\n 'price',\n )\n alternative = {\n 'params': {\n 'read_only': True,\n 'lookup_field': 'slug',\n 'view_name': 'look-detail',\n }\n }\n","sub_path":"src/face_full/api_v1/serializers/market/looks.py","file_name":"looks.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"654364063","text":"import csv\nimport datetime\nimport math\nimport random\nimport sys\nimport argparse\nfrom collections import Counter\nimport imageio\nimport pickle\n\nimport numpy as np\nfrom fitness_utils.fitness import get_fitness, get_image_fitness, get_classifier_fitness\nfrom genetic_components.genetic_operators import (crossover, gen_rnd_expr,\n mutation,\n tournament_selection)\n\nexperiment_time = datetime.datetime.now()\nfunction_set = {\n 'abs',\n 'add',\n 'and', \n 'cos', \n 'div', \n 'exp', \n 'if',\n 'log',\n 'max',\n 'mdist',\n 'min',\n 'mod',\n 'mult',\n 'neg',\n 'or',\n 'pow',\n 'scalarT',\n 'scalarV',\n 'sign',\n 'sin',\n 'sqrt',\n 'sub',\n 'tan',\n 'warp',\n 'xor',\n}\nterminal_set = set() \n\nfor i in range(255):\n# terminal_set.add(i)\n terminal_set.add('x')\n terminal_set.add('y')\n\ndef initialize_population(population_size, fitness_func, image_size, image_to_fit):\n population = []\n for individual in range(population_size):\n depth_check = 0\n channel_trees = []\n for tree_number in range(4):\n while depth_check == 0:\n tree_size = int(random.random() * 4) + 1\n tree = gen_rnd_expr(function_set, terminal_set, tree_size, 'ramped half-and_half')\n depth_check = tree.get_depth()\n channel_trees.append(tree)\n individual_result = []\n red_tree = channel_trees[0]\n green_tree = channel_trees[1]\n blue_tree = channel_trees[2]\n alpha_tree = channel_trees[3]\n population.append({'channel_trees': channel_trees, 'fitness': fitness_func(x_size=image_size[0], y_size=image_size[1], red_tree=red_tree,green_tree=green_tree, blue_tree=blue_tree, alpha_tree=alpha_tree, current_individual=individual, current_generation=-1, image_to_fit=image_to_fit)})\n return population\n\n\ndef engine(population_size, generation_number, tournament_size, mutation_rate, crossover_rate, image_size, seed, image_to_fit=None, resume_file=None):\n engine_state = {\n 'population_size': population_size, \n 'generation_number': generation_number,\n 'tournament_size': tournament_size,\n 'mutation_rate': mutation_rate,\n 'crossover_rate': crossover_rate,\n 'image_size': image_size,\n 'seed': seed,\n 'image_to_fit': image_to_fit,\n 'population': [],\n 'current_generation': 1,\n }\n fitness_func = None\n lines = []\n lines.append(['seed', 'gen_number', 'best_fitness', 'best_individual', 'biggest_tree_depth', 'best_red', 'best_green', 'best_blue', 'best_alpha'])\n current_generation = 1\n if image_to_fit is None:\n fitness_func = get_classifier_fitness\n else:\n fitness_func = get_image_fitness\n if resume_file == None:\n population = initialize_population(population_size, fitness_func, image_size, image_to_fit)\n else:\n with open(resume_file, 'rb') as dump_file:\n engine_state = pickle.load(dump_file)\n current_generation = engine_state['current_generation']\n population = engine_state['population']\n print(\"Finished Generating\")\n best = {'fitness': float('inf')}\n try:\n while current_generation < generation_number:\n engine_state['population'] = population\n engine_state['current_generation'] = current_generation\n new_population = []\n new_population.append(best)\n max_tree_depth = 0\n if current_generation % 100 == 0:\n immigrants = initialize_population(population_size, fitness_func, image_size, image_to_fit)\n population.extend(immigrants)\n foo = random.sample(population, population_size)\n population = foo\n for current_individual in range(population_size - 1):\n individual_result = []\n child = [0,0,0,0] \n max_child_depth = 0\n for current_tree in range(4):\n member_depth = float('inf')\n while member_depth > 17:\n if random.random() < crossover_rate:\n parent_1 = tournament_selection(tournament_size, population)\n parent_2 = tournament_selection(tournament_size, population)\n child[current_tree] = crossover(parent_1['channel_trees'][current_tree], parent_2['channel_trees'][current_tree])\n elif random.random() < crossover_rate + mutation_rate:\n parent = tournament_selection(tournament_size, population)\n child[current_tree] = mutation(parent['channel_trees'][current_tree], function_set=function_set, terminal_set=terminal_set)\n else:\n parent = tournament_selection(tournament_size, population)\n child[current_tree] = parent['channel_trees'][current_tree]\n member_depth = child[current_tree].get_depth() \n tree_string = child[current_tree].get_string()\n if member_depth > max_child_depth:\n max_child_depth = member_depth \n new_member = {}\n new_member = {'channel_trees': child, 'fitness': fitness_func(red_tree=child[0],green_tree=child[1], blue_tree=child[2], alpha_tree=child[3], current_individual=current_individual, current_generation=current_generation, x_size= image_size[0], y_size= image_size[1], best_fit=best['fitness'], image_to_fit=image_to_fit), 'depth': max_child_depth}\n if new_member['fitness'] < best['fitness']:\n best = new_member\n best['result'] = individual_result\n if max_tree_depth < max_child_depth:\n max_tree_depth = max_child_depth\n new_population.append(new_member)\n lines.append([str(seed), str(current_generation), str(best['fitness']), best['depth'], max_tree_depth, best['channel_trees'][0].get_string(),best['channel_trees'][1].get_string(),best['channel_trees'][2].get_string(),best['channel_trees'][3].get_string()])\n print(\"###SEED \" + str(seed) + \" GENERATION \" + str(current_generation) + \" REPORT###\")\n print(\"BEST DEPTH: \" + str(best['depth']))\n print(\"BEST FITNESS: \" + str(best['fitness']))\n print(\"MAX DEPTH: \" + str(max_tree_depth))\n print(\"BEST STRINGS: \\n\\t\" + best['channel_trees'][0].get_string() + '\\n\\t' + best['channel_trees'][1].get_string() + '\\n\\t' + best['channel_trees'][2].get_string() + '\\n\\t' + best['channel_trees'][3].get_string())\n population = new_population\n with open('logs/' + str(experiment_time) + '_fitness_results.csv', 'a') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerows(lines)\n lines = []\n current_generation += 1\n except Exception as e:\n print('Exception: '+ str(e))\n #with open('dumps/' + str(experiment_time) + '_dumps', 'ab') as dump_file:\n with open('dumps/latest_dump', 'ab') as dump_file:\n pickle.dump(engine_state, dump_file)\n print(\"Saved state!\")\n return True\n \ndef main():\n engine(100, 100, 3, 0.2, 0.9, [1024,1024], 0)\n\nif __name__ == \"__main__\":\n \"\"\" Main function worker \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Evolutionary Algorithm for Image Generation\")\n parser.add_argument(\n dest=\"population_size\",\n )\n parser.add_argument(\n dest=\"generation_number\",\n )\n parser.add_argument(\n dest=\"tournament_size\")\n parser.add_argument(\n dest=\"mutation_rate\")\n parser.add_argument(\n dest=\"crossover_rate\")\n parser.add_argument(\n help=\"Example of the expected format 256x256\",\n dest=\"image_size\")\n parser.add_argument(\n dest=\"seed\")\n\n args = parser.parse_args()\n\n random.seed(int(args.seed))\n image_resolution = args.image_size.split('x')\n engine(\n int(args.population_size),\n int(args.generation_number),\n int(args.tournament_size),\n float(args.mutation_rate),\n float(args.crossover_rate),\n [int(image_resolution[0]), int(image_resolution[1])],\n int(args.seed)\n )\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"646639163","text":"from flask import Blueprint\nfrom flask import render_template, url_for, redirect, flash, request, abort, session,\\\n Response, current_app, send_from_directory\nfrom whatSticksWebApp import db, bcrypt, mail\nfrom whatSticksWebApp.models import User, Post, Health_description, Health_measure\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport secrets\nimport os\nfrom datetime import datetime, date, time, timedelta\nimport datetime\nfrom sqlalchemy import func, desc\nimport pandas as pd\nimport json\nimport zipfile\nfrom whatSticksWebApp.main.utils import json_dict_to_dfs, plot_text_format, chart_scripts, get_user_tz_util\nfrom bokeh.plotting import figure, output_file\nfrom bokeh.embed import components\nfrom bokeh.resources import CDN\nfrom bokeh.io import curdoc\nfrom bokeh.themes import built_in_themes\nfrom bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Text\nimport pytz\nimport zoneinfo\nfrom pytz import timezone\nimport time\n\nmain = Blueprint('main', __name__)\n\n# @main.route('/get_post_json', methods=['POST'])\n# def get_post_json(): \n # data = request.get_json()\n # data2=request.args\n # print('here',data2, data)\n # return jsonify(status=\"success\", data=data)\n\n@main.route(\"/dashboard\", methods=[\"GET\",\"POST\"])\n@login_required\ndef dashboard():\n\n user_tz = get_user_tz_util()\n default_date=datetime.datetime.now().astimezone(user_tz).strftime(\"%Y-%m-%d\")\n default_time=datetime.datetime.now().astimezone(user_tz).strftime(\"%H:%M\")\n \n #filter on user data only\n base_query_health_description=db.session.query(Health_description).filter(Health_description.user_id==1)#1 is OK it get's replaced\n \n if current_user.id==2:\n df_health_description=pd.read_sql(str(base_query_health_description)[:-1]+str(1),db.session.bind)\n else:\n df_health_description=pd.read_sql(str(base_query_health_description)[:-1]+str(current_user.id),db.session.bind)\n \n if len(df_health_description)>0:\n script1, div1=chart_scripts(df_health_description)\n cdn_js=CDN.js_files\n cdn_css=CDN.css_files\n else:\n div1=None;script1=None;cdn_js=None;cdn_css=None\n\n #Timle line table\n column_names=['ID','Date and Time','Type of Activity','Cardio Performance','Duration (seconds)','Weight']\n \n df_sub=df_health_description[['id', 'datetime_of_activity', 'var_activity','metric1_carido',\n 'metric2_session_duration','metric3']].copy()\n df_sub.datetime_of_activity=df_sub['datetime_of_activity'].astype('datetime64[ns]')\n df_sub.datetime_of_activity=pd.to_datetime(df_sub[\"datetime_of_activity\"].dt.strftime('%m/%d/%Y %H:%M'))\n df_sub.metric1_carido=df_sub.metric1_carido.round(2)\n df_sub.metric2_session_duration=df_sub.metric2_session_duration.astype('Int64')\n df_sub.metric2_session_duration=df_sub.metric2_session_duration.apply('{:,}'.format)\n df_sub.metric2_session_duration=df_sub.metric2_session_duration.str.replace('','')\n df_sub=df_sub.where(pd.notnull(df_sub), '')\n df_sub=df_sub.sort_values(by=['datetime_of_activity'],ascending=False)\n table_lists=df_sub.values.tolist()\n \n \n if len(table_lists)==0:\n no_hits_flag=True\n else:\n no_hits_flag=False\n\n if request.method == 'POST':\n formDict = request.form.to_dict()\n print('formDict::::',formDict)\n if formDict.get('submit_activity'):\n\n activity_date=formDict.get('activity_date')\n activity_time=formDict.get('activity_time')\n\n # activity_date_weight=formDict.get('activity_date_weight')\n # activity_time_weight=formDict.get('activity_time_weight')\n \n var_activity=formDict.get('var_activity')\n activity_notes=formDict.get('activity_notes')\n metric3=formDict.get('metric3_weight')\n \n return redirect(url_for('main.add_activity', activity_date=activity_date,activity_time=activity_time,\n # activity_date_weight=activity_date_weight,activity_time_weight=activity_time_weight,\n metric3=metric3,var_activity=var_activity, activity_notes=activity_notes))\n\n \n elif formDict.get('submit_upload_health')=='True':\n return redirect(url_for('main.upload_health_data'))\n \n elif formDict.get('delete_record_id'):\n delete_record_id=formDict.get('delete_record_id')\n return redirect(url_for('main.delete_record', delete_record_id=delete_record_id))\n\n \n return render_template('dashboard.html', div1=div1, script1=script1, cdn_js=cdn_js, cdn_css=cdn_css,\n default_date=default_date, default_time=default_time, table_data=table_lists, no_hits_flag=no_hits_flag,\n len=len,column_names=column_names)\n\n\n@main.route(\"/delete_record\",methods=[\"GET\",\"POST\"])\n@login_required\ndef delete_record():\n delete_record_id=request.args.get('delete_record_id')\n print('delete_record_id:::',delete_record_id)\n db.session.query(Health_description).filter(Health_description.id==delete_record_id).delete()\n db.session.query(Health_measure).filter(Health_measure.description_id==delete_record_id).delete()\n db.session.commit()\n return redirect(url_for('main.dashboard'))\n\n@main.route(\"/add_activity\",methods=[\"GET\",\"POST\"])\n@login_required\ndef add_activity():\n print('add_activity--requests:::',request.args)\n \n user_tz = get_user_tz_util()\n \n #convert this date time to utc\n date_time_obj_unaware = datetime.datetime.strptime(request.args.get('activity_date')+request.args.get('activity_time'), '%Y-%m-%d%H:%M')\n date_time_obj_aware=user_tz.localize(date_time_obj_unaware)\n timezone_offset = date_time_obj_aware.utcoffset().total_seconds()/60\n\n timezone_offset=request.args.get('timezone_offset')\n weight=request.args.get('metric3')\n var_activity=request.args.get('var_activity')\n activity_notes=request.args.get('activity_notes')\n\n \n # var_timezone_utc_delta_in_mins get this by using the: cur_zone_time.utcoffset().total_seconds()/60\n if weight:\n # print('if weight.....', weight)\n update_activity=Health_description(datetime_of_activity=date_time_obj_aware,var_type='Weight',var_activity='Weight',\n var_timezone_utc_delta_in_mins=timezone_offset, user_id=current_user.id,source_filename='web application',\n metric3=weight)\n # print('update_activity::::', update_activity)\n elif not activity_notes:\n # print('not activity_notes')\n update_activity=Health_description(datetime_of_activity=date_time_obj_aware,var_type='Activity',\n var_timezone_utc_delta_in_mins=timezone_offset, user_id=current_user.id,source_filename='web application',\n var_activity=var_activity)\n else:\n update_activity=Health_description(datetime_of_activity=date_time_obj_aware,var_type='Activity',\n var_timezone_utc_delta_in_mins=timezone_offset, user_id=current_user.id,source_filename='web application',\n var_activity=var_activity, note=activity_notes)\n db.session.add(update_activity)\n db.session.commit()\n return redirect(url_for('main.dashboard'))\n # return render_template('dashboard.html', div1=div1, script1=script1, cdn_js=cdn_js, cdn_css=cdn_css,\n # default_date=default_date, default_time=default_time)\n\n\n\n@main.route(\"/upload health data\", methods=[\"GET\",\"POST\"])\n@login_required\ndef upload_health_data():\n\n if request.method == 'POST':\n print('POST method')\n formDict = request.form.to_dict()\n filesDict = request.files.to_dict()\n print('formDict:::', formDict)\n print('filesDict:::', filesDict)\n if formDict.get('upload_file_button'):\n # print(dir(filesDict.get('uploaded_file')))\n # print('filename:::',filesDict.get('uploaded_file').filename)\n if filesDict.get('uploaded_file').filename=='':\n flash(f'File not selected', 'warning')\n return redirect(url_for('main.upload_health_data'))\n \n print('upload button pressed')\n #save file \n uploaded_file = request.files['uploaded_file']\n current_files_dir=os.path.join(current_app.config['UPLOADED_FILES_FOLDER'])\n uploaded_file.save(os.path.join(current_files_dir,uploaded_file.filename))\n \n #TODO: polar upload should be a utility of its own. Code should\n #look in json files and pull heart rate by second, distance and speed.\n #right now ***too much hard coded stuff in json_dict_to_df_dict***\n \n #get files to json dict\n polar_zip=zipfile.ZipFile(os.path.join(current_app.config[\n 'UPLOADED_FILES_FOLDER'], uploaded_file.filename))\n \n polar_data_dict={}\n for i in polar_zip.filelist:\n polar_data_dict[i.filename]=json.loads(polar_zip.read(i.filename))\n \n #get files to df dict\n df_description,df_measure=json_dict_to_dfs(polar_data_dict)\n session_count=len(df_description)\n \n #put data into tables\n df_description.to_sql('health_description',db.engine, if_exists='append',index=False)\n df_measure.to_sql('health_measure',db.engine, if_exists='append',index=False)\n \n flash(f'Files uploaded ' + str(session_count) +' new sessions', 'success')\n return redirect(url_for('main.upload_health_data'))\n \n \n \n \n return render_template('upload_health_data.html')\n \n \n \n \n \n \n ","sub_path":"whatSticksWebApp/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"15167230","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('classification', models.CharField(default=b'Attorney', max_length=10, choices=[(b'Support', b'Support Staff'), (b'Attorney', b'Attorney')])),\n ('default_Bill_Rate', models.SmallIntegerField()),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"employees/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"304323548","text":"#!/usr/bin/env python\n\nu\"\"\".\nsplit your dataset into train and val\n\"\"\"\n\nimport os\nimport glob\nimport random\nimport sys\nsys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nimport cv2\n\nt_img_list = glob.glob(\"t/*.jpg\")\nf_img_list = glob.glob(\"f/*.jpg\")\n\n\nos.makedirs(\"splited_data\", exist_ok=True)\nos.makedirs(\"splited_data/train/t\", exist_ok=True)\nos.makedirs(\"splited_data/train/f\", exist_ok=True)\nos.makedirs(\"splited_data/val/t\", exist_ok=True)\nos.makedirs(\"splited_data/val/f\", exist_ok=True)\n\n\nfor filepath in t_img_list:\n img_name = os.path.basename(filepath)\n print(img_name)\n img = cv2.imread(filepath)\n rand = random.random()\n if(rand > 0.1):\n cv2.imwrite(\"splited_data/train/t/\" + img_name, img)\n else:\n cv2.imwrite(\"splited_data/val/t/\" + img_name, img)\n\nfor filepath in f_img_list:\n img_name = os.path.basename(filepath)\n print(img_name)\n img = cv2.imread(filepath)\n rand = random.random()\n if(rand > 0.1):\n cv2.imwrite(\"splited_data/train/f/\" + img_name, img)\n else:\n cv2.imwrite(\"splited_data/val/f/\" + img_name, img)\n","sub_path":"split_dataset.py","file_name":"split_dataset.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"395351489","text":"import re\n\nwith open(\"6.txt\", 'r') as f:\n insts = f.readlines()\n\ngrid = [[0]*1000 for i in range(1000)]\n\nfor inst in insts:\n boundaries = re.findall(\"\\d+\", inst)\n x, y, X, Y = [int(boundary) for boundary in boundaries]\n for j in range(y, Y+1):\n for i in range(x, X+1):\n if \"on\" in inst:\n grid[j][i] = 1\n elif \"off\" in inst:\n grid[j][i] = 0\n else:\n if grid[j][i]:\n grid[j][i] = 0\n else:\n grid[j][i] = 1\n\nprint(sum(sum(row) for row in grid))\n","sub_path":"2015/06/6-1.py","file_name":"6-1.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"528339232","text":"n = int(input())\r\nvo = []\r\nlas = ''\r\nd = {}\r\nfor i in range(n):\r\n n = input()\r\n try:\r\n b = d[n]\r\n v = 2\r\n if i%2==0:\r\n v = 1\r\n vo.append(v)\r\n except:\r\n if las !='' and las[-1]!=n[0]:\r\n v = 2\r\n if i%2==0:\r\n v = 1\r\n vo.append(v)\r\n d[n] = 1\r\n las = n\r\nif vo==[]:\r\n print('Fair Game')\r\nelse:\r\n print(f'Player {vo[0]} lost')","sub_path":"shiritori.py","file_name":"shiritori.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"258468606","text":"from Particle import *\nimport pygame as pg\nfrom Constants import *\n\ndef render():\n\n pg.init()\n window = pg.display.set_mode((WIDTH, HEIGHT))\n\n particles = []\n keys = {\n pg.K_KP_MINUS: False,\n pg.K_KP_PLUS: False,\n pg.K_ESCAPE: False\n }\n\n for i in range(0, PARTICULATES):\n particles.append(Particle())\n\n zoom = 1.0\n\n while True:\n pg.display.flip()\n window.fill((0, 0, 0))\n window.lock()\n for particle in particles:\n if not particle._merged:\n # for the non-merged particles, draw a circle based on their radii\n # considering the zoom factor\n pg.draw.circle(window, (255, 255, 255),\n (int(HWIDTH + zoom * HWIDTH * (particle._position[0] - HWIDTH) / HWIDTH),\n int(HHEIGHT + zoom * HHEIGHT * (particle._position[1] - HHEIGHT) / HHEIGHT)),\n int(particle._radius * zoom), 0)\n window.unlock()\n while True:\n # This block updates the state of whether a key has been pressed\n event = pg.event.poll()\n if event.type == pg.NOEVENT:\n break\n elif event.type in [pg.KEYDOWN, pg.KEYUP]:\n keys[event.key] = event.type == pg.KEYDOWN\n\n # Update the positions and speeds of the particles\n for p1 in particles:\n if p1._merged:\n continue\n p1._resetAcceleration()\n for p2 in particles:\n if p1 is p2 or p2._merged:\n continue\n p1._updateAcceleration(p2)\n p1._updatePosition()\n\n # Conservation of total momentum; merge the particles that touch\n # using elastic collisions\n for p1 in particles:\n if p1._merged:\n continue\n for p2 in particles:\n if p1 is p2 or p2._merged:\n continue\n if Particle._contact(p1, p2):\n if p1._mass < p2._mass:\n p1, p2 = p2, p1\n p2._merged = True\n\n p1._mass += p2._mass\n p1._setRadius()\n p1._newVelocity(p1, p2)\n\n if keys[pg.K_KP_PLUS]:\n zoom += 0.1\n if keys[pg.K_KP_MINUS]:\n zoom -= 0.1\n if keys[pg.K_ESCAPE]:\n break\n if event.type == pg.NOEVENT:\n pg.time.wait(10)","sub_path":"src/Render.py","file_name":"Render.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"165457797","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# -*- coding: utf-8 -*-\nimport os.path as op\nfrom nipype.interfaces.base import CommandLine, traits, TraitedSpec, File, Directory, CommandLineInputSpec, Undefined\nfrom nipype.interfaces.mrtrix3.base import MRTrix3BaseInputSpec, MRTrix3Base\n\nclass fod2fixelInputSpec(MRTrix3BaseInputSpec):\n in_file = File(\n exists=True, argstr=\"%s\", position=-5, mandatory=True, desc=\"input dwi image\"\n )\n #out_file is a folder\n out_file = Directory(argstr=\"%s\", usedefault=True, mandatory=True, position=-4, desc=\"output folder\")\n fmls_peak_value = traits.Float(\n argstr=\"-fmls_peak_value %d\", desc=\"any lobe with a maximal peak amplitude smaller than this threshold will be discarded\"\n )\n \n fmls_integral = traits.Float(\n argstr=\"-fmls_integral %f\", desc=\"any lobe with an integral smaller than this threshold will be discarded\"\n )\n afd_file = File(\n usedefault=True, argstr=\"-afd %s\", position=-3, \n desc=\"output the total Apparent Fibre Density per fixel (integral of FOD lobe)\"\n )\n peak_file = File(\n usedefault=True, argstr=\"-peak_amp %s\", position=-2, \n desc=\"output the amplitude of the FOD at the maximal peak per fixel\"\n )\n disp_file = File(\n usedefault=True, argstr=\"-disp %s\", position=-1, \n desc=\"output a measure of dispersion per fixel as the ratio between FOD lobe integral and maximal peak amplitude\"\n )\n\nclass fod2fixelOutputSpec(TraitedSpec):\n out_file = File(argstr=\"%s\", desc=\"output image\")\n afd_file = File(argstr=\"-afd %s\", desc=\"output AFD file\")\n peak_file = File(argstr=\"-peak_amp %s\", desc=\"output peak amplitude file\")\n disp_file = File(argstr=\"-disp %s\", desc=\"output fixel dispersion file\")\n\nclass fod2fixel(MRTrix3Base):\n \"\"\"\n Perform segmentation of continuous Fibre Orientation Distributions \n (FODs) to produce discrete fixels\n Example\n -------\n >>> fod2Fixel.inputs.in_file = 'wmfod.mif' \n >>> fod2Fixel.inputs.fmls_peak_value = 0\n >>> fod2Fixel.inputs.fmls_integral = 0.1\n >>> fod2fixel.cmdline \n 'fod2Fixel wmfod.mif -fmls_peak_value 0 -fmls_integral 0.1 -afd afd,mif -peak_amp peak.mif -disp disp.mif\n >>> fod2Fixel.run() \n \"\"\"\n _cmd = \"fod2fixel\"\n input_spec = fod2fixelInputSpec\n output_spec = fod2fixelOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = op.abspath(self.inputs.out_file)\n if self.inputs.afd_file != Undefined:\n outputs[\"afd_file \"] = op.abspath(self.inputs.afd_file )\n if self.inputs.peak_file != Undefined:\n outputs[\"peak_file\"] = op.abspath(self.inputs.peak_file)\n if self.inputs.disp_file != Undefined:\n outputs[\"disp_file\"] = op.abspath(self.inputs.disp_file)\n return outputs \n\n ","sub_path":"fod2fixel_function.py","file_name":"fod2fixel_function.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"282562252","text":"from django.urls import path\nfrom . import views\nfrom django.conf.urls import url\n\nurlpatterns = [\n path(\"\",views.home, name=\"home\"),\n path(\"home\",views.home, name=\"home\"),\n path(\"vehicles//\",views.vehicles, name=\"vehicles\"),\n path(\"create_rent//\", views.create_rent, name='create_rent'),\n path(\"rent_details\",views.rent_details,name='rent_details'),\n path(\"rented_successfully\",views.rented_successfully,name='rented_successfully'),\n path(\"admin_dash\",views.admin_dash,name='admin_dash'),\n path(\"add_cars\",views.add_cars,name='add_cars'),\n path(\"add_staff\",views.add_staff,name='add_staff'),\n path(\"delete_item///\",views.delete_item,name='delete_item'),\n path(\"edit_cars//\",views.edit_cars,name='edit_cars'),\n path(\"edit_rent//\",views.edit_rent,name='edit_rent'),\n]\n","sub_path":"RENTaCAR/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"630533687","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom django.contrib.auth import authenticate, login, logout, get_user\nfrom django.contrib.auth.models import User, AnonymousUser\nfrom .forms import *\nfrom .models import *\nfrom .vision_api import upload as api_upload, get_url, get_blob_metadata\nfrom django.conf import settings\n\n\ndef validate_username(request):\n username = request.GET[\"username\"]\n data = {\n \"is_taken\": User.objects.filter(username__iexact=username).exists()\n }\n return JsonResponse(data)\n\n\ndef validate_email(request):\n email = request.GET['email']\n email.replace('%40', '@')\n data = {\n \"is_taken\": User.objects.filter(email__iexact=email).exists()\n }\n return JsonResponse(data)\n\n\ndef test_view(request):\n context = {}\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n import uuid\n if form.is_valid():\n context['original_file_name'] = request.FILES['fileInput'].name\n context['file_type'] = request.FILES['fileInput'].name.split('.')[-1]\n context['uploaded_file_name'] = str(uuid.uuid4()) + '.' + context['file_type']\n context['username'] = get_user(request)\n context['input'] = UploadForm()\n return render(request, 'test.html', context=context)\n\n\ndef view_img(request, image_id):\n url = ''\n if request.method == 'GET':\n images = Image.objects.filter(user_id=request.user, image_name=image_id)\n for image in images:\n if image.url != '':\n url = image.url\n else:\n url = get_url(str(image.image_name))\n return render(request, 'view.html', context={'url': url})\n\n\ndef load_home(request):\n\n def get_urls(images):\n _urls = {}\n for _image in images:\n _name = str(_image.image_name)\n if _image.url == '':\n _image.url = get_url(_name)\n _image.save()\n if _image.url != '':\n _urls[_name] = _image.url\n return _urls\n\n search = request.GET['search'].split(' ') if 'search' in request.GET else None\n user_image = Image.objects.filter(user_id=request.user)\n show = []\n for image in user_image:\n name = str(image.image_name)\n if search is not None:\n if image.tags == '':\n image.tags = get_blob_metadata(name)\n image.save()\n for s in search:\n if s in image.tags.split(','):\n show.append(image)\n else:\n show.append(image)\n return get_urls(show)\n\n\ndef home_view(request):\n context = {}\n if not request.user.is_authenticated:\n return redirect(\"/login/\")\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n if form.is_valid():\n file_name = api_upload(request, settings.__getattr__(\"FMG\"))\n Image(user_id=request.user, image_name=file_name).save()\n else:\n print('Form not valid')\n context['urls'] = load_home(request)\n return render(request, 'home.html', context=context)\n\n\ndef login_view(request):\n context = {\"form\": LogInForm()}\n if request.method == 'GET':\n if type(get_user(request)) is not AnonymousUser:\n return redirect(\"home\")\n return render(request, 'login.html', context=context)\n else:\n username = request.POST['usernameInput']\n password = request.POST['passInput']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user, backend=False)\n return redirect(\"home\")\n else:\n return redirect(\"/login/\")\n\n\ndef sign_in_view(request):\n context = {\"form\": SignInForm()}\n if request.method == \"GET\":\n return render(request, 'sign-in.html', context)\n else:\n password = request.POST['passInput']\n username = request.POST['usernameInput']\n email = request.POST['mailInput']\n\n if username is not None:\n if not User.objects.filter(username=username).exists():\n user_new = User.objects.create_user(username=username, password=password, email=email)\n user_new.save()\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user, backend=False)\n return redirect(\"home\")\n else:\n pass\n return render(request, 'sign-in.html', context)\n\n\ndef user_logout(request):\n logout(request)\n return redirect(\"home\")\n\n\ndef upload(request):\n return render(request, 'upload.html', {'form': UploadForm()})\n","sub_path":"ProgettoCloud/Main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"537985766","text":"# -*- coding: utf-8 -*-\n\"\"\"\n------------------------------------------------- \nFile Name: PCA_LDA \nDescription : \nAuthor : ml \ndate: 2018/7/27\n------------------------------------------------- \nChange Activity: \n\t\t\t\t2018/7/27:\n-------------------------------------------------\n\"\"\"\n__author__ = 'ml'\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn import datasets\n\n#加载数据集\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\ntarget_names = iris.target_names\npca = PCA(n_components=2)\nX_r = pca.fit(X).transform(X)\nlda = LinearDiscriminantAnalysis(n_components=2)\nX_r2 = lda.fit(X,y).transform(X)\n#绘图PCA\nplt.figure()\ncolors = ['navy','turquoise','darkorange']\nlw = 2\nfor color,i,target_names in zip(colors,[0,1,2],target_names):\n plt.scatter(X_r[y == i,0],X_r[y == i,1],color=color,alpha=0.8,lw=lw,label=target_names)\nplt.legend(loc='best',shadow=False,scatterpoints=1)\nplt.title('PCA of IRIS data')\n#绘制LDA\nplt.figure()\ncolors = ['navy','turquoise','darkorange']\nlw = 2\nfor color,i,target_names in zip(colors,[0,1,2],target_names):\n plt.scatter(X_r2[y == i,0],X_r2[y == i,1],color=color,alpha=0.8,lw=lw,label=target_names)\nplt.legend(loc='best',shadow=False,scatterpoints=1)\nplt.title('PCA of IRIS data')\n\nplt.show()","sub_path":"ml_algorithm/PCA_LDA.py","file_name":"PCA_LDA.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"596871766","text":"print('-------------------------------------------------------')\ntry:\n file1 = input('Enter file name: ') # input file name to be worked on\n opfile = open(file1) # opening the file\n\n for line in opfile:\n line = opfile.read() # reading each line in the file\n line = line.upper() # converting all the characters in the file to uppercase\n print(line)\nexcept:\n print('File ', file1, 'does not exist')\n","sub_path":"src/chapter7/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319674278","text":"import os\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nDEBUG_PROPAGATE_EXCEPTIONS = DEBUG\n \nDATABASE_ENGINE = 'sqlite3'\nDATABASE_NAME = ':memory:'\n\nTIME_ZONE = 'UTC'\n \nSITE_ID = 1\n \nSECRET_KEY = '00000000000000000000000000000000000000000000000000'\n\nROOT_URLCONF = 'example.urls'\n\nTEMPLATE_DIRS = (\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'templates')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'usergroups',\n 'example.groups',\n)\n","sub_path":"example/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"136311529","text":"from django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.core.urlresolvers import reverse\n\nfrom django_tables2.tables import Table\nfrom suit.templatetags.suit_menu import Menu\n\nfrom motius_django.admin import motius_site\nfrom motius_frontend.forms import ContactForm\n\nregister = template.Library()\n\n\n@register.assignment_tag(takes_context=True)\ndef get_menu_motius(context, request):\n \"\"\"\n :type request: WSGIRequest\n \"\"\"\n if not isinstance(request, WSGIRequest):\n return None\n\n # Try to get app list\n template_response = motius_site.index(request)\n try:\n app_list = template_response.context_data['app_list']\n except Exception:\n return\n\n return Menu(context, request, app_list).get_app_list()\n\n\n@register.inclusion_tag('motius_frontend/partials/contact.html', takes_context=True)\ndef contact_form(context):\n request = context['request']\n initial = {\n 'next': request.get_full_path(),\n 'name': request.user.get_full_name() if request.user.is_authenticated() else '',\n 'email': request.user.email if request.user.is_authenticated() else '',\n 'phone': request.user.client_profile.phone if hasattr(request.user, 'client_profile') else '',\n }\n\n if 'contact' in request.POST:\n form = ContactForm(request.POST)\n else:\n form = ContactForm(initial=initial)\n\n return {'form': form}\n\n\n@register.filter\ndef verbose_name(object):\n \"\"\"\n Returns verbose names of objects\n\n :param object:\n :return:\n \"\"\"\n if isinstance(object, Table):\n return object._meta.model._meta.verbose_name\n return object._meta.verbose_name\n\n\n@register.filter\ndef admin_url(object):\n content_type = ContentType \\\n .objects \\\n .get_for_model(object.__class__)\n return reverse(\"admin:%s_%s_change\" % (\n content_type.app_label,\n content_type.model),\n args=(object.id,))\n\n@register.filter\ndef content_type_id(name):\n return ContentType.objects.get(model=name).pk","sub_path":"motius_frontend/templatetags/motius.py","file_name":"motius.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"397289571","text":"from django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.views import View\nfrom django.views.generic import TemplateView\nfrom django.utils.encoding import force_text\nfrom account.models import User\nfrom .forms import SignupForm\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.template import Template, Context\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes\nfrom django.core.mail import EmailMultiAlternatives\nfrom .tokens import account_activation_token\n\n\nclass SignUpView(View):\n template_name = 'signup.html'\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(reverse('home'))\n form = SignupForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = SignupForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.email_confirmation = False\n user.save()\n host = request.get_host()\n send_register_email(user, host)\n return redirect('signup_confirmation')\n return render(request, self.template_name, {'form': form})\n\n\nclass SignUpConfirmationView(View):\n template_name = 'signup_confirmation.html'\n\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name)\n\n\nclass InvalidActivation(TemplateView):\n template_name = 'invaid_activation.html'\n\n\nclass SignUpActivationDone(TemplateView):\n template_name = 'signup_confirmation_done.html'\n\n\ndef send_register_email(user, host):\n try:\n subject, from_email, to = 'Email Confirmation', settings.EMAIL_HOST_USER, [user.email]\n html = render_to_string('signup_confirmation_email.html',\n {\n 'user': user,\n 'domain': host,\n 'uid': urlsafe_base64_encode(force_bytes(user.email)),\n 'token': account_activation_token.make_token(user)\n })\n content = Template(html).render(Context({}))\n msg = EmailMultiAlternatives(subject, content, from_email, to)\n msg.attach_alternative(content, \"text/html\")\n msg.send()\n except Exception as e:\n pass\n\n\ndef activate(request, uidb64, token):\n try:\n u_email = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(email=u_email)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return redirect('signup_activation_done')\n else:\n return redirect(reverse('invalid_activate'))\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"76862806","text":"class HashTable:\n def __init__(self, length):\n self.table = [None] * length\n self.n = 0\n \n def first_hash(self, key):\n m = len(self.table)\n return key % m\n \n def second_hash(self, home, i):\n \"\"\"Quadratic probing eliminates primary clustering by\n increasing the distance between each probe in the\n sequence. In practice, quadratic probin typically\n reduces the number of collisions but introduces\n the problem of Secondary clustering \n \"\"\"\n c = 3\n m = len(self.table)\n slot = (home + i**2) % m\n return slot\n \n def search(self, key):\n i = 0\n #Look for an empty slot from where key was intially maped\n home = self.first_hash(key)\n position = home\n while self.table[position] != None:\n if self.table[position] == key:\n return position\n i += 1\n position = self.second_hash(home, i)\n if not self.table[position]:\n return position\n \n return \"Slots are full\"\n \n def insert(self, key):\n slot = self.search(key)\n if self.table[slot] != None:\n return slot\n self.table[slot] = key\n self.n += 1\n return self.table\n \n def delete(self, key):\n slot = self.search(key)\n if self.table[slot] == key:\n self.table[slot] = -1\n return self.table \n return \"Not found\"\n \n def searchKey(self, key):\n slot = self.search(key)\n if self.table[slot] != key:\n return \"Not found\"\n return \"found\"\n \n\n# t = HashTable(13)\n# t.insert(765)\n# t.insert(431)\n# t.insert(96)\n# t.insert(142)\n# t.insert(579)\n# t.insert(226)\n# t.insert(903)\n# t.insert(388)\n\n\n\n","sub_path":"hashTable/quadratic_hashing/quadratic_probing.py","file_name":"quadratic_probing.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"78305373","text":"import ConfigParser\n\nclass profile_handler:\n\tpass\n\n\nconf = ConfigParser.ConfigParser()\nconf.read(\"./profiles.cfg\")\n\n\n#show all saved profiles from profiles.cfg\ndef profile_get_sections():\n\tfor sec in conf.sections():\n\t\tprint('\\033[93m' + sec + '\\033[0m' + \"\\n\\t\" + '\\033[95m' + conf.get(sec,\"description\")+\"\\n\\033[0m\")\t\t#'\\033[93m \\033[95m \\033[0m' are for coloring the text\n\n\n\n#add a new section to cfg fle named: \"profile_\"\ndef profile_add_section(sec_name,sec_intervall,sec_frames,sec_description):\n\tconf.add_section(\"profile_\" + sec_name)\n\tconf.set(\"profile_\"+sec_name, \"intervall\" , sec_intervall)\n\tconf.set(\"profile_\"+sec_name, \"frames\" , sec_frames)\n\tconf.set(\"profile_\"+sec_name, \"description\" , sec_description)\n\twith open(\"./profiles.cfg\",\"wb\") as a:\n\t\tconf.write(a)\n\n\ndef profile_delete_section(sec_name):\n\tif conf.has_section(sec_name):\n\t\tconf.remove_section(sec_name)\n\t\twith open(\"./profiles.cfg\",\"wb\") as a:\n\t\t\tconf.write(a)\n\t\ta.close()\n\telse:\n\t\traise Exception(\"type an existing profile name: \")\n\n\n\n\n","sub_path":"profile_handler.py","file_name":"profile_handler.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"237108549","text":"import csv\nimport os.path\nimport xml.etree.ElementTree as ET\n\n\ndef main():\n base_path = os.path.join(os.path.dirname(__file__), 'files')\n xml_file = os.path.join(base_path, 'app7.xml')\n csv_file = os.path.join(base_path, 'import_result.csv')\n\n xml_root = ET.Element('Root')\n xml_tree = ET.ElementTree(xml_root)\n results = ET.SubElement(xml_root, 'Results')\n\n with open(csv_file) as fd:\n csv_fd = csv.reader(fd, delimiter=',')\n csv_headers = next(csv_fd)\n print(str.format('[*] csv headers: {}', csv_headers))\n\n for line in csv_fd:\n print(str.format('[*] line: {}', line))\n result = ET.SubElement(results, 'Result')\n for position, item in enumerate(line):\n tag = ET.SubElement(\n result,\n csv_headers[position]\n )\n tag.text = item\n\n xml_tree.write(\n xml_file,\n encoding='ISO-8859-1',\n xml_declaration=True,\n short_empty_elements=False,\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"work/xml_processing/app6.py","file_name":"app6.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"56029222","text":"import telepot\nimport threading\nimport time\n\nimport Message\n\nclass Telegram(threading.Thread):\n def __init__(self, moana, sleepTime):\n threading.Thread.__init__(self)\n print(\"Create Telegram\")\n self.bot = None\n \n self.moana = moana\n \n self.stopFlag = False\n self.chat_id = \"\"\n \n self.messageBuffer = []\n\n self.sleepTime = sleepTime\n \n def connect(self, token):\n self.bot = telepot.Bot(token)\n self.bot.message_loop(self.requestHandle)\n \n def stop(self):\n print(\"Stop Telegram\")\n self.stopFlag = True\n \n def sendMessage(self, message):\n msg = \"\"\"\n [ {0} | {1} ]\n {2}\n \"\"\".format(message.serverName, message.time, message.content)\n \n self.bot.sendMessage(self.chat_id, msg)\n \n def requestHandle(self, msg):\n if self.chat_id == \"\":\n self.chat_id = msg['chat']['id']\n command = msg['text']\n print(\"User command: {0}\".format(command))\n\n reply = self.moana.handleTelegramRequest(command) \n self.sendMessage(reply)\n \n def setMessage(self, message):\n if self.chat_id == \"\":\n print(\"Chat_id is empty\")\n return\n msg = Message.Message()\n msg.setMessage(message.serverName, message.content)\n \n self.messageBuffer.append(msg)\n \n def run(self):\n try:\n while True:\n if self.stopFlag:\n break\n \n if self.messageBuffer:\n for msg in self.messageBuffer:\n self.sendMessage(msg)\n del self.messageBuffer[:]\n \n time.sleep(self.sleepTime)\n \n except KeyboardInterrupt:\n self.moana.stop()\n \n print(\"Telegram End\")","sub_path":"Telegram.py","file_name":"Telegram.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"176889545","text":"import tkinter as tk\r\nimport time\r\nimport random\r\nimport simpleaudio as sa\r\n\r\nScr = 0\r\nClickBool = False\r\ntimeStr = random.randint(100,5500)\r\nStar = False\r\nChecked = False\r\n\r\nwin = sa.WaveObject.from_wave_file(\"Click.wav\")\r\nlose = sa.WaveObject.from_wave_file(\"Death.wav\")\r\n\r\ndef Next():\r\n global Star\r\n global Checked\r\n global Scr\r\n if Checked == False:\r\n if Scr > 0:\r\n Scr -= 1\r\n lose.play()\r\n Scores.configure(text = \"Scores: {}\".format(Scr))\r\n buttonClick.configure(bg = \"white\",activebackground = \"white\",text = \"Wait..\")\r\n Star = False\r\n Checked = False\r\n buttonClick.after(random.randint(100,5500), CLICK)\r\n\r\ndef CLICK():\r\n global Star\r\n buttonClick.configure(bg = \"red\",activebackground = \"red\",text = \"Click!\")\r\n Star = True\r\n buttonClick.after(400, Next)\r\n\r\ndef Clicked():\r\n global Scr\r\n global Checked\r\n if Star == True:\r\n Checked = True\r\n Scr += 1\r\n win.play()\r\n Scores.configure(text = \"Scores: {}\".format(Scr))\r\n else:\r\n if Scr > 0:\r\n Scr -= 1\r\n lose.play()\r\n Scores.configure(text = \"Scores: {}\".format(Scr))\r\n\r\nroot = tk.Tk()\r\nroot.geometry(\"200x200+800+350\")\r\nroot.resizable(False,False)\r\nroot.title(\"Time Clicker\")\r\nroot.iconbitmap('mar.ico')\r\n\r\nframe1 = tk.Frame(root,bd = 20)\r\nframe2 = tk.Frame(root,bd = 20)\r\nframe1.pack(side='bottom')\r\nframe2.pack(side='bottom')\r\n\r\nbuttonClick = tk.Button(frame1,text = \"Wait..\", bg = \"white\",width=17,height=5,command = Clicked)\r\nbuttonClick.pack()\r\n\r\nScores = tk.Label(frame2,text = \"Scores: {}\".format(Scr))\r\nScores.pack()\r\n\r\nbuttonClick.after(timeStr,CLICK)\r\n\r\nroot.mainloop()\r\n","sub_path":"Click_Time.py","file_name":"Click_Time.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"361499842","text":"N, T = map(int, input().split())\nAB= []\nfor _ in range(N):\n a, b = map(int, input().split())\n AB.append((a, b))\nAB.sort(key=lambda x: x[1], reverse=True)\nAB.sort()\n# print(AB)\nA = [x[0] for x in AB]\nB = [x[1] for x in AB]\n\ndp = [[0] * (T + 3) for _ in range(N + 3)]\ndp[0][0] = 0\nfor i in range(N):\n for j in range(T):\n if A[i] <= j:\n dp[i + 1][j] = max(dp[i][j], dp[i][j - A[i]] + B[i])\n else:\n dp[i + 1][j] = max(dp[i + 1][j], dp[i][j])\n\n# print(dp)\nans = 0\nfor i in range(N):\n # print(dp[i][T - 1])\n ans = max(ans, dp[i][T - 1] + B[i])\nprint(ans)","sub_path":"Python_codes/p02863/s991290428.py","file_name":"s991290428.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"248734525","text":"from time import sleep\n\nfrom httpx import Client, TimeoutException\nfrom structlog import get_logger\n\nlog = get_logger()\n\nhttp_client = Client(timeout=30)\n\n\ndef fetch_json(url, params=None):\n for i in range(0, 5):\n try:\n response = http_client.get(url, params=params)\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(\n f\"{response.status_code} error when calling {url}\"\n )\n except (TimeoutException) as e:\n log.exception(\n f\"Timed out when calling {url} with params {params}\", error=e\n )\n sleep(2 ** i)\n except Exception as e:\n log.exception(\n f\"Error when calling {url} with params {params}\", error=e\n )\n\n\ndef clean(input_string):\n return input_string.strip().lower().replace(\",\", \"\")\n\n\ndef clean_csv(input_string):\n return [clean(y) for y in str(input_string).split(\", \") if y != \"\"]\n","sub_path":"pipeline/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"518723475","text":"import json\nimport logging\n\nimport numpy as np\nimport qcelemental as qcel\n\nfrom . import frag\nfrom .exceptions import AlgError, OptError\nfrom . import v3d\nfrom .addIntcos import connectivityFromDistances, addCartesianIntcos\n\n\nclass Molsys(object):\n \"\"\" The molecular system consisting of a collection of fragments\n\n Parameters\n ----------\n fragments : list(Frag)\n fb_fragments : list\n NYI fixed body fragments\n intcos : list(Simple), optional\n\n \"\"\"\n def __init__(self, fragments, fb_fragments=None, intcos=None, multiplicity=1):\n # ordinary fragments with internal structure\n self.logger = logging.getLogger(__name__)\n self._fragments = []\n if fragments:\n self._fragments = fragments\n # fixed body fragments defined by Euler/rotation angles\n self._fb_fragments = []\n if fb_fragments:\n self._fb_fragments = fb_fragments\n self._multiplicity = multiplicity\n\n def __str__(self):\n s = ''\n for iF, F in enumerate(self._fragments):\n s += \"\\n\\tFragment %d\\n\" % (iF + 1)\n s += F.__str__()\n for iB, B in enumerate(self._fb_fragments):\n s += \"\\tFixed body Fragment %d\\n\" % (iB + 1)\n s += B.__str__()\n return s\n\n @classmethod\n def fromPsi4Molecule(cls, mol):\n \"\"\" Creates a optking molecular system from psi4 molsys\n\n Parameters\n ----------\n mol: object\n psi4 mol\n\n Returns\n -------\n cls :\n optking molecular system: list of fragments\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"\\tGenerating molecular system for optimization from PSI4.\")\n\n NF = mol.nfragments()\n logger.info(\"\\t%d fragments in PSI4 molecule object.\" % NF)\n frags = []\n\n for iF in range(NF):\n fragMol = mol.extract_subsets(iF + 1)\n\n fragNatom = fragMol.natom()\n logger.info(\"\\tCreating fragment %d with %d atoms\" % (iF + 1, fragNatom))\n\n fragGeom = np.zeros((fragNatom, 3), float)\n fragGeom[:] = fragMol.geometry()\n\n fragZ = []\n for i in range(fragNatom):\n fragZ.append(int(fragMol.Z(i)))\n\n fragMasses = []\n for i in range(fragNatom):\n fragMasses.append(fragMol.mass(i))\n\n frags.append(frag.Frag(fragZ, fragGeom, fragMasses))\n\n m = mol.multiplicity()\n return cls(frags, multiplicity=m)\n\n @classmethod\n def from_JSON_molecule(cls, JSON_string):\n \"\"\" Creates optking molecular system from JSON input.\n\n Parameters\n ----------\n JSON_string : string\n Takes in a string of the molecule key from the QC JSON schema\n see http://molssi-qc-schema.readthedocs.io/en/latest/auto_topology.html\n\n Returns\n -------\n cls:\n molsys cls consists of list of Frags\n \"\"\"\n\n logger = logging.getLogger(__name__)\n logger.info(\"\\tGenerating molecular system for optimization from QC Schema.\\n\")\n molecule = json.loads(JSON_string)\n\n geom = np.asarray(molecule['geometry'])\n geom = geom.reshape(-1, 3)\n\n Z_list = [qcel.periodictable.to_Z(atom) for atom in molecule['symbols']]\n\n masses_list = molecule.get('masses')\n if masses_list is None:\n masses_list = [qcel.periodictable.to_mass(atom) for atom in molecule['symbols']]\n\n frags = []\n if 'fragments' in molecule:\n for iF in range(len(molecule['fragments'])):\n frag_geom = geom[iF[0]:iF[-1] + 1]\n frag_masses = masses_list[iF[0]:(iF[-1] + 1)]\n frag_Z_list = Z_list[iF[0]:(iF[-1] + 1)]\n frags.append(frag.Frag(frag_Z_list, frag_geom, frag_masses))\n else:\n frags.append(frag.Frag(Z_list, geom, masses_list))\n\n return cls(frags)\n\n @property\n def Natom(self):\n return sum(F.Natom for F in self._fragments)\n\n @property\n def multiplicity(self):\n return self._multiplicity\n\n @property\n def Nfragments(self):\n return len(self._fragments) + len(self._fb_fragments)\n\n # Return overall index of first atom in fragment, beginning 0,1,...\n def frag_1st_atom(self, iF):\n if iF >= len(self._fragments):\n return ValueError()\n start = 0\n for i in range(0, iF):\n start += self._fragments[i].Natom\n return start\n\n def frag_atom_range(self, iF):\n start = self.frag_1st_atom(iF)\n return range(start, start + self._fragments[iF].Natom)\n\n # accepts absolute atom index, returns fragment index\n def atom2frag_index(self, atom_index):\n for iF, F in enumerate(self._fragments):\n if atom_index in self.frag_atom_range(iF):\n return iF\n raise OptError(\"atom2frag_index: atom_index impossibly large\")\n\n # Given a list of atoms, return all the fragments to which they belong\n def atomList2uniqueFragList(self, atomList):\n fragList = []\n for a in atomList:\n f = self.atom2frag_index(a)\n if f not in fragList:\n fragList.append(f)\n return fragList\n\n @property\n def geom(self):\n \"\"\"cartesian geometry [a0]\"\"\"\n geom = np.zeros((self.Natom, 3), float)\n for iF, F in enumerate(self._fragments):\n row = self.frag_1st_atom(iF)\n geom[row:(row + F.Natom), :] = F.geom\n return geom\n\n @geom.setter\n def geom(self, newgeom):\n \"\"\" setter for geometry\"\"\"\n for iF, F in enumerate(self._fragments):\n row = self.frag_1st_atom(iF)\n F.geom[:] = newgeom[row:(row + F.Natom), :]\n\n @property\n def masses(self):\n m = np.zeros(self.Natom, float)\n for iF, F in enumerate(self._fragments):\n start = self.frag_1st_atom(iF)\n m[start:(start + F.Natom)] = F.masses\n return m\n\n @property\n def Z(self):\n z = [0 for i in range(self.Natom)]\n for iF, F in enumerate(self._fragments):\n first = self.frag_1st_atom(iF)\n z[first:(first + F.Natom)] = F.Z\n return z\n\n @property\n def intcos(self):\n _intcos = []\n for F in self._fragments:\n _intcos += F.intcos\n return _intcos\n\n def frag_1st_intco(self, iF):\n if iF >= len(self._fragments):\n return ValueError()\n start = 0\n for i in range(0, iF):\n start += len(self._fragments[i]._intcos)\n return start\n\n def printIntcos(self):\n for iF, F in enumerate(self._fragments):\n self.logger.info(\"Fragment %d\\n\" % (iF + 1))\n F.printIntcos()\n return\n\n def addIntcosFromConnectivity(self, C=None):\n for F in self._fragments:\n if C is None:\n C = F.connectivityFromDistances()\n F.addIntcosFromConnectivity(C)\n\n def addCartesianIntcos(self):\n for F in self._fragments:\n addCartesianIntcos(F._intcos, F._geom)\n\n def printGeom(self):\n \"\"\"Returns a string of the geometry for logging in [a0]\"\"\"\n for iF, F in enumerate(self._fragments):\n self.logger.info(\"\\tFragment %d\\n\" % (iF + 1))\n F.printGeom()\n\n def showGeom(self):\n \"\"\"Return a string of the geometry in [A]\"\"\"\n molsys_geometry = ''\n for iF, F in enumerate(self._fragments):\n molsys_geometry += (\"\\tFragment %d\\n\" % (iF + 1))\n molsys_geometry += F.showGeom()\n return molsys_geometry\n\n @property\n def atom_symbols(self):\n symbol_list = []\n for F in self._fragments:\n frag_symbol_list = F.get_atom_symbol_list()\n for j in frag_symbol_list:\n symbol_list.append(j)\n return symbol_list\n\n def consolidateFragments(self):\n if self.Nfragments == 1:\n return\n self.logger.info(\"\\tConsolidating multiple fragments into one for optimization.\")\n consolidatedFrag = frag.Frag(self.Z, self.geom, self.masses)\n del self._fragments[:]\n self._fragments.append(consolidatedFrag)\n\n def splitFragmentsByConnectivity(self):\n \"\"\" Split any fragment not connected by bond connectivity.\"\"\"\n tempZ = np.copy(self.Z)\n tempGeom = np.copy(self.geom)\n tempMasses = np.copy(self.masses)\n\n newFragments = []\n for F in self._fragments:\n C = connectivityFromDistances(F.geom, F.Z)\n atomsToAllocate = list(reversed(range(F.Natom)))\n while atomsToAllocate:\n frag_atoms = [atomsToAllocate.pop()]\n\n more_found = True\n while more_found:\n more_found = False\n addAtoms = []\n for A in frag_atoms:\n for B in atomsToAllocate:\n if C[A, B]:\n if B not in addAtoms:\n addAtoms.append(B)\n more_found = True\n for a in addAtoms:\n frag_atoms.append(a)\n atomsToAllocate.remove(a)\n\n frag_atoms.sort()\n subNatom = len(frag_atoms)\n subZ = np.zeros(subNatom, float)\n subGeom = np.zeros((subNatom, 3), float)\n subMasses = np.zeros(subNatom, float)\n for i, I in enumerate(frag_atoms):\n subZ[i] = tempZ[I]\n subGeom[i, 0:3] = tempGeom[I, 0:3]\n subMasses[i] = tempMasses[I]\n newFragments.append(frag.Frag(subZ, subGeom, subMasses))\n\n del self._fragments[:]\n self._fragments = newFragments\n\n # Supplements a connectivity matrix to connect all fragments. Assumes the\n # definition of the fragments has ALREADY been determined before function called.\n def augmentConnectivityToSingleFragment(self, C):\n self.logger.info('\\tAugmenting connectivity matrix to join fragments.')\n fragAtoms = []\n geom = self.geom\n for iF, F in enumerate(self._fragments):\n fragAtoms.append(\n range(self.frag_1st_atom(iF),\n self.frag_1st_atom(iF) + F.Natom))\n\n # Which fragments are connected?\n nF = self.Nfragments\n self.logger.critical(str(self.Nfragments))\n if self.Nfragments == 1:\n return\n\n frag_connectivity = np.zeros((nF, nF))\n for iF in range(nF):\n frag_connectivity[iF, iF] = 1\n\n Z = self.Z\n\n scale_dist = 1.3\n all_connected = False\n while not all_connected:\n for f2 in range(nF):\n for f1 in range(f2):\n if frag_connectivity[f1][f2]:\n continue # already connected\n minVal = 1.0e12\n\n # Find closest 2 atoms between fragments.\n for f1_atom in fragAtoms[f1]:\n for f2_atom in fragAtoms[f2]:\n tval = v3d.dist(geom[f1_atom], geom[f2_atom])\n if tval < minVal:\n minVal = tval\n i = f1_atom\n j = f2_atom\n\n Rij = v3d.dist(geom[i], geom[j])\n R_i = qcel.covalentradii.get(Z[i], missing=4.0)\n R_j = qcel.covalentradii.get(Z[j], missing=4.0)\n if Rij > scale_dist * (R_i + R_j):\n # ignore this as too far - for starters. may have A-B-C situation.\n continue\n\n self.logger.info(\"\\tConnecting fragments with atoms %d and %d\"\n % (i + 1, j + 1))\n C[i][j] = C[j][i] = True\n frag_connectivity[f1][f2] = frag_connectivity[f2][f1] = True\n\n # Now check for possibly symmetry-related atoms which are just as close\n # We need them all to avoid symmetry breaking.\n for f1_atom in fragAtoms[f1]:\n for f2_atom in fragAtoms[f2]:\n if f1_atom == i and f2_atom == j: # already have this one\n continue\n tval = v3d.dist(geom[f1_atom], geom[f2_atom])\n if np.fabs(tval - minVal) < 1.0e-10:\n i = f1_atom\n j = f2_atom\n self.logger.info(\"\\tAlso, with atoms %d and %d\\n\"\n % (i + 1, j + 1))\n C[i][j] = C[j][i] = True\n\n # Test whether all frags are connected using current distance threshold\n if np.sum(frag_connectivity[0]) == nF:\n self.logger.info(\"\\tAll fragments are connected in connectivity matrix.\")\n all_connected = True\n else:\n scale_dist += 0.2\n self.logger.info(\n \"\\tIncreasing scaling to %6.3f to connect fragments.\" % scale_dist)\n return\n\n def clear(self):\n self._fragments.clear()\n self._fb_fragments.clear()\n\n","sub_path":"optking/molsys.py","file_name":"molsys.py","file_ext":"py","file_size_in_byte":13424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"445678461","text":"import pickle\nimport os\n\ndef reset_memory():\n mem = load('assignments')\n mem = []\n save('assignments', mem)\n\ndef load(name):\n '''\n This method creates and loads a new journal.\n\n : param name: This base name of journal to load.\n : return: A new journal data structure populated with the file data.\n '''\n filename = get_full_pathname(name)\n data = None\n if os.path.exists(filename):\n with open(filename, 'rb') as fin:\n data = pickle.load(fin)\n return data\n\n\ndef save(name, data):\n filename = get_full_pathname(name)\n print(\"... saving to: {}\".format(filename))\n with open(filename, 'wb') as file_out:\n pickle.dump(data, file_out, pickle.HIGHEST_PROTOCOL)\n\n\ndef get_full_pathname(name):\n filename = os.path.abspath(os.path.join('.',name + '.pkl'))\n return filename\n\nif __name__ == \"__main__\":\n reset_memory()\n","sub_path":"file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"362473722","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nif __name__ == '__main__':\n some_dict = {\n 1: \"abc\",\n 2: \"home\",\n 3: \"test\",\n 4: \"task\"\n }\n print(f\"Словарь до изменений:\\n{some_dict}\")\n dict_items = some_dict.items()\n changed_dict = {i: j for j, i in dict_items}\n print(f\"Словарь после изменений:\\n{changed_dict}\")\n","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"108341967","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('asset', '0008_auto_20150520_1523'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='asbrand',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('brandd', models.CharField(max_length=30, verbose_name=b'\\xe8\\xb5\\x84\\xe4\\xba\\xa7\\xe5\\x93\\x81\\xe7\\x89\\x8c')),\n ('remark', models.CharField(max_length=200, verbose_name=b'\\xe5\\xa4\\x87\\xe6\\xb3\\xa8')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"asset/migrations/0009_asbrand.py","file_name":"0009_asbrand.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"532938897","text":"#!/usr/bin/python3\n\ntry:\n import sys,os\n\n# from matplotlib import style\n# import matplotlib.pyplot as plt\n# import matplotlib.animation as animation\n# import matplotlib.dates as dates\n import pyqtgraph as pg\n from pyqtgraph.Qt import QtCore, QtGui\n import numpy as np\n\n\n import threading\n\n import datetime\n import socket\n import select\n import time\n\n import inspect\n import re\n from time import strftime\nexcept ImportError as e:\n print('failed to import: {0}'.format(e))\n sys.exit()\n\n\nclass StoppableThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.stop_flag = threading.Event()\n\n def stop(self):\n if self.isAlive() == True:\n self.stop_flag.set()\n\n def stopped(self):\n return self.stop_flag.is_set()\n\n\n\nclass Log(object):\n def __init__(self, logfile=False, level='debug', display=True, maxlength=20):\n self.logfile = logfile\n self.display = display\n self.level = level\n self.maxlength = maxlength\n\n self.colors = { 'red' : '\\033[31m',\n 'white' : '\\033[37m',\n 'gray' : '\\033[0m',\n 'orange' : '\\033[33m',\n 'blue' : '\\033[34m',\n 'green' : '\\033[32m',\n 'reset' : '\\033[0m' }\n\n self.colors_levels = { 'info' : 'white',\n 'error' : 'red',\n 'debug' : 'gray',\n 'warning' : 'orange' }\n\n self.custom_highlights = {}\n\n\n def choose_show(self, level):\n \"\"\" Decide if a message should be shown based on configured message level \"\"\"\n if self.level == 'error' and (level == 'debug' or level == 'warning' or level == 'info'):\n return False\n if self.level == 'warning' and (level =='debug' or level == 'info'):\n return False\n if self.level == 'info' and (level == 'debug'):\n return False\n return True\n\n\n def create_message(self, level, module, message):\n # TODO: Add feature to detect lists/dicts and print them out nicely\n if self.choose_show(level):\n message = self.detect_type(message)\n module_justified = module.ljust(self.maxlength)\n level_justified = level.ljust(7)\n time = strftime(\"%H:%M:%S\")\n\n if self.display:\n print(\"{0} {1} {2} {3}\".format(module_justified,\n self.colors[self.colors_levels[level]],\n self.custom_highlight(message, self.colors[self.colors_levels[level]]),\n self.colors['reset']))\n\n if self.logfile:\n self.write_to_file(\"{0} {1}{2}{3}\\n\".format(strftime(\"%Y-%m-%d %H:%M:%S\"),\n level_justified,\n module_justified,\n message))\n\n\n def detect_type(self, message):\n \"\"\" Detect whether message is list or dict \"\"\"\n if type(message) == list:\n message = ' , '.join(message)\n elif type(message) == dict:\n message_out = ''\n for k,v in message.items():\n message_out = \"{0}\\n{1} : {2}\".format(message_out,k,v)\n message = message_out\n return message\n\n\n def create_file(self):\n \"\"\" Create a file if it doesn't exist \"\"\"\n try:\n with open(self.logfile) as f: pass\n except IOError as e:\n try:\n FILE = open(self.logfile, 'w')\n FILE.close()\n except IOError as e:\n print('WARNING ... Couldn\\'t create file \\'%s\\' Not writing logs!'%self.logfile)\n return False\n return True\n\n\n def write_to_file(self, message):\n if self.create_file():\n try:\n FILE = open(self.logfile, 'a')\n FILE.write(message)\n FILE.close()\n except:\n print('Failed to write to logfile')\n\n\n def custom_highlight(self, message, reset_color):\n if message:\n for string, color in self.custom_highlights.items():\n message = re.sub( string, self.colors[color] + string + reset_color, message)\n return message\n\n\n def color(self, string, color):\n \"\"\" Callable method to add a custom highlight eg. ( log.color('what_to_highlight', 'color_to_use') ) \"\"\"\n self.custom_highlights[string] = color\n\n\n def info(self, message):\n self.create_message('info', inspect.stack()[1][3], message)\n\n\n def debug(self, message):\n self.create_message('debug', inspect.stack()[1][3], message)\n\n\n def warning(self, message):\n self.create_message('warning', inspect.stack()[1][3], message)\n\n\n def error(self, message):\n self.create_message('error', inspect.stack()[1][3], message)\n\n\n def red(self, message):\n self.create_message('info', inspect.stack()[1][3], message)\n\n\n def blue(self, message):\n self.create_message('info', inspect.stack()[1][3], message)\n\n\n def green(self, message):\n self.create_message('info', inspect.stack()[1][3], message)\n\n\n def orange(self, message):\n self.create_message('info', inspect.stack()[1][3], message)\n\n\n\nclass Config_Option(object):\n \"\"\" Helper class of Config() \"\"\"\n def __init__(self, section=False, comment=[], key=False, value=False):\n self.section = section\n self.key = key\n self.value = value\n if comment:\n if type(comment) == list:\n self.comment = comment\n else:\n self.comment = [comment]\n else:\n self.comment = []\n\n def set_comment(self, comment): \n if type(comment) == list:\n self.comment = comment\n else:\n self.comment = [comment]\n\n def set_section(self, section): self.section = section\n def set_key(self, key): self.key = value\n def set_value(self, value): self.value = value\n def get_section(self): return self.section\n def get_comment(self): return self.comment\n def get_key(self): return self.key\n def get_value(self): return self.value\n\n\n\nclass Config(object):\n def __init__(self, quiet=False):\n self.config_file_path = False\n # This list stores all config option objects\n self.config = []\n # display errrors\n self.quiet = quiet\n\n\n def set_option(self, option): self.config.append(option)\n def set_config_path(self, path): self.config_file_path = path\n def get_options(self): return sorted(self.config, key=lambda x: x.get_section(), reverse=False)\n def get_config_path(self): return self.config_file_path\n\n\n def test_float(self, var):\n try:\n return float(var)\n except:\n return False\n\n\n def test_int(self, var):\n try:\n return int(var)\n except:\n return False\n\n\n def convert_numbers(self, var):\n \"\"\" Convert strings or lists of numbers to floats or ints \"\"\"\n # var is a list\n if type(var) == list:\n\n for x in range(0, len(var)):\n if self.test_int(var[x]):\n var[x] = self.test_int(var[x])\n elif self.test_float(var[x]):\n var[x] = self.test_float(var[x])\n\n # Var is not a list\n else:\n if self.test_int(var):\n var = int(var)\n elif self.test_float(var):\n var = float(var)\n\n return var\n\n\n def parse_file(self, path):\n \"\"\" Parse file and create a list with option objects \"\"\"\n\n # Get config file contents in a list\n section = False\n comments = []\n\n config_file = self.get_file()\n\n if not config_file:\n return False\n\n for line in config_file:\n # clean line from whitespaces, newlines etc\n line = self.sanitize(line)\n\n # Line is empty, do nothing\n if not line:\n pass\n\n # Line is commented\n elif line[0] == '#':\n comments.append(self.sanitize(line[1:]))\n\n # Line is a section header\n elif line[0] == '[' and line[-1] == ']':\n section = self.sanitize(line, extra_opts = ['[', ']'])\n\n # We are in a section loop\n elif section:\n\n # Line is a key/value pair\n if '=' in line:\n k,v = line.split('=', 1)\n k = self.sanitize(k)\n v = self.sanitize(v)\n\n # replace certain values like ~ -> /home/\n v = self.replace(v)\n # TODO Find a solution for this, the replaced variable should not be written back to the file\n # Also does this not work for variables set by config.set()\n\n # Value is empty, add empty value\n if not v:\n option = self.set(section, k, '', comment=comments)\n comments = []\n\n\n # Value is a list\n elif v[0] == '[' and v[-1] == ']':\n v = self.sanitize(v, extra_opts = ['[', ']'])\n\n # Value contains a comma. read all values in a list\n if ',' in v:\n v_list = self.sanitize_list(v.split(','))\n option = self.set(section, k, v_list, comment=comments)\n comments = []\n\n\n # Value doesn't contain comma so could be a list with a single item or an empty list\n else:\n if v:\n option = self.set(section, k, v, comment=comments)\n else:\n option = self.set(section, k, [], comment=comments)\n comments = []\n\n # Value is a simple key, value pair\n else:\n option = self.set(section, k, v, comment=comments)\n comments = []\n\n return self.config\n\n\n def set(self, section, k, v, comment=[]):\n \"\"\" Create a config_option() instance and fill it with data \"\"\"\n v = self.convert_numbers(v)\n # If option already exist, change it\n for option in self.get_options():\n if option.get_section() == section:\n if option.get_key() == k:\n option.set_comment(comment)\n option.set_value(v)\n return option\n # If option does not exist, create it\n option = Config_Option(key=k, value=v, section=section, comment=comment)\n self.set_option(option)\n return option\n\n\n def get(self, section, key):\n \"\"\" Get a value from list of config_option() instances in self.config by section and key \"\"\"\n for option in self.get_options():\n if option.get_section() == section:\n if option.get_key() == key:\n return option.get_value()\n if not self.quiet:\n print('Couldn\\'t find value for key in section {0} : {1}'.format(section, key))\n return False\n\n\n def test_file(self):\n \"\"\" Test if file exists \"\"\"\n try:\n with open(self.config_file_path) as f: pass\n return True\n except IOError as e:\n return False\n\n\n def ensure_dir(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n\n def write_to_file(self, data=False, remove=False):\n \"\"\" Write a string to a file, remove file if it exists by giving remove=False \"\"\"\n if not self.get_config_path():\n return False\n\n self.ensure_dir(os.path.dirname(self.get_config_path()))\n\n if remove == True:\n try:\n FILE = open(self.config_file_path, 'w')\n FILE.close()\n return True\n except:\n if not self.quiet:\n print('Failed to remove file')\n return False\n\n else:\n try:\n FILE = open(self.config_file_path, 'a')\n FILE.write(data + '\\n')\n FILE.close()\n return True\n except:\n if not self.quiet:\n print('Failed to write to file')\n pass\n return False\n\n\n def write(self):\n \"\"\" Write the config to disk \"\"\"\n\n self.write_to_file(remove=True)\n section = False\n first = True\n\n for option in self.get_options():\n current_section = option.get_section()\n\n if not current_section == section:\n # Only put newline above section header if it is not the first one\n if first:\n self.write_to_file('[{0}]'.format(current_section))\n first = False\n else:\n self.write_to_file('\\n[{0}]'.format(current_section))\n section = current_section\n\n comment = option.get_comment()\n for c in comment:\n self.write_to_file('# {0}'.format(c))\n\n value = option.get_value()\n if type(value) == list:\n value = '[{0}]'.format(','.join(value))\n\n self.write_to_file('{0} = {1}'.format(option.get_key(),value))\n if not self.quiet:\n print('File written to: {0}'.format(self.config_file_path))\n\n\n def get_file(self):\n \"\"\" Get contents of a file and put every line in a list\"\"\"\n contents = []\n try:\n f = open(self.config_file_path, 'r')\n except IOError as e:\n if not self.quiet:\n print('No config file found at: {0}'.format(self.config_file_path))\n return False\n\n for line in f:\n if line:\n contents.append(self.sanitize(line))\n f.close()\n\n if contents:\n return contents\n return False\n\n\n def sanitize(self, data, extra_opts = []):\n \"\"\" Clean variable from newlines, leading/trailing spaces and other stuff \"\"\"\n sanitize_list = [' ', '\\'', '\\\"', '\\n'] + extra_opts\n for sanitize in sanitize_list:\n data = data.strip(sanitize)\n return data\n\n\n def sanitize_list(self, data):\n \"\"\" Clean list indices from newlines, leading/trailing spaces and other stuff \"\"\"\n output = []\n for x in data:\n x = x.strip()\n x = x.strip('\\'')\n x = x.strip('\\\"')\n x = x.strip('\\n')\n x = x.strip('[')\n x = x.strip(']')\n output.append(x)\n data = output[:]\n return data\n\n\n def replace(self, data):\n \"\"\" Replace characters or strings in a string with something else \"\"\"\n replace_list = {'~' : os.getenv(\"HOME\"), '' : socket.gethostname()}\n for k,v in replace_list.items():\n data = data.replace(k, v)\n return data\n\n\n def parse(self):\n # Parse the config file\n if self.parse_file(self.config_file_path):\n return True\n return False\n\n\n\nclass ADS1299(object):\n def __init__(self):\n self.registers = {}\n self.registers['ID'] = 0x00\n self.registers['CONFIG1'] = 0x01\n self.registers['CONFIG2'] = 0x02\n self.registers['CONFIG3'] = 0x03\n self.registers['LOFF'] = 0x04\n self.registers['CH1SET'] = 0x05\n self.registers['CH2SET'] = 0x06\n self.registers['CH3SET'] = 0x07\n self.registers['CH4SET'] = 0x08\n self.registers['CH5SET'] = 0x09\n self.registers['CH6SET'] = 0x0A\n self.registers['CH7SET'] = 0x0B\n self.registers['CH8SET'] = 0x0C\n self.registers['BIAS_SENSP'] = 0x0D\n self.registers['BIAS_SENSN'] = 0x0E\n self.registers['LOFF_SENSP'] = 0x0F\n self.registers['LOFF_SENSN'] = 0x10\n self.registers['LOFF_FLIP'] = 0x11\n self.registers['LOFF_STATP'] = 0x12\n self.registers['LOFF_STATN'] = 0x13\n self.registers['GPIO'] = 0x14\n self.registers['MISC1'] = 0x15\n self.registers['MISC2'] = 0x16\n self.registers['CONFIG4'] = 0x17\n\n self.values = {}\n self.values['ID'] = list('00011110')\n self.values['CONFIG1'] = list('10010110')\n self.values['CONFIG2'] = list('11000000')\n self.values['CONFIG3'] = list('01100000')\n self.values['LOFF'] = list('00000000')\n self.values['CH1SET'] = list('01100000')\n self.values['CH2SET'] = list('01100000')\n self.values['CH3SET'] = list('01100000')\n self.values['CH4SET'] = list('01100000')\n self.values['CH5SET'] = list('01100000')\n self.values['CH6SET'] = list('01100000')\n self.values['CH7SET'] = list('01100000')\n self.values['CH8SET'] = list('01100000')\n self.values['BIAS_SENSP'] = list('00000000')\n self.values['BIAS_SENSN'] = list('00000000')\n self.values['LOFF_SENSP'] = list('00000000')\n self.values['LOFF_SENSN'] = list('00000000')\n self.values['LOFF_FLIP'] = list('00000000')\n self.values['LOFF_STATP'] = list('00000000')\n self.values['LOFF_STATN'] = list('00000000')\n self.values['GPIO'] = list('00001111')\n self.values['MISC1'] = list('00000000')\n self.values['MISC2'] = list('00000000')\n self.values['CONFIG4'] = list('00000000')\n\n\n def get_reg(self, reg):\n return '0x{:02x}'.format(self.registers[reg]), hex(int(''.join(self.values[reg]),2))\n\n\n def get_all_regs(self):\n return_list = []\n for reg in self.registers:\n return_list.append(self.get_reg(reg))\n return return_list\n\n\n def set_reg(self, reg, bit, value):\n self.values[reg][bit-1] = str(value)\n return self.get_reg(reg)\n\n\n def set_channel(self, channel, state=True):\n # Enable/disable channel\n if state:\n log.info('Enabling channel: {0}'.format(channel))\n return self.set_reg('CH{0}SET'.format(str(channel)), 1, 0)\n else:\n log.info('Disabling channel: {0}'.format(channel))\n return self.set_reg('CH{0}SET'.format(str(channel)), 1, 1)\n\n\n def set_srb1(self, state=True):\n # Enable/disable channel\n if state:\n log.info('Setting SRB1 as ground on all channels')\n return self.set_reg('MISC1', 3, 1)\n else:\n log.info('Disconnecting SRB1')\n return self.set_reg('MISC1', 3, 0)\n\n\n def set_internal_ref(self, state=True):\n # Enable/disable channel\n if state:\n log.info('Using internal reference')\n return self.set_reg('CONFIG3', 1, 1)\n else:\n log.info('Using external reference')\n return self.set_reg('CONFIG3', 1, 0)\n\n\n def set_gain(self, channel, level):\n gain = {}\n gain[1] = list('000')\n gain[2] = list('001')\n gain[4] = list('010')\n gain[6] = list('011')\n gain[8] = list('100')\n gain[12] = list('101')\n gain[24] = list('110')\n\n self.set_reg('CH{0}SET'.format(str(channel)), 2, gain[level][0])\n self.set_reg('CH{0}SET'.format(str(channel)), 3, gain[level][1])\n self.set_reg('CH{0}SET'.format(str(channel)), 4, gain[level][2])\n\n\n\nclass Data(object):\n def __init__(self, config):\n # TODO: create a method to create a filename with date_increasing number\n self.channel = False\n self.data = False\n self.loff_p = True\n self.loff_n = True\n self.config = config\n #self.timestamp = datetime.datetime.now().strftime(\"%H:%M:%S.%f\")\n self.timestamp = datetime.datetime.today()\n\n\n def set_channel(self, channel): self.channel = channel\n def set_data(self, data): self.data = data\n def set_loff_n(self, state): self.loff_n = state\n def set_loff_p(self, state): self.loff_p = state\n def get_channel(self): return self.channel\n def get_data(self): return self.data\n def get_timestamp(self): return self.timestamp\n def get_loff_p(self): return self.loff_p\n def get_loff_n(self): return self.loff_n\n\n\n def create_file(self):\n \"\"\" Create a file if it doesn't exist \"\"\"\n try:\n with open(self.config.get('log', 'path')) as f: pass\n except IOError as e:\n try:\n FILE = open(self.config.get('log', 'path'), 'w')\n FILE.close()\n except IOError as e:\n log.error('WARNING ... Couldn\\'t create file \\'%s\\''%self.write_path)\n return False\n return True\n\n\n def write(self):\n \"\"\" Write data to file \"\"\"\n if self.create_file():\n try:\n FILE = open(self.config.get('log', 'path'), 'a')\n #FILE.write(\"{0}|{1}|{2}\\n\".format(self.get_timestamp(), \\\n # self.get_channel(), \\\n # self.get_data()))\n FILE.write(\"{0}|{1}\\n\".format( self.get_channel(), \\\n self.get_data()))\n FILE.close()\n except:\n log.error('Failed to write to file')\n\n\n def display(self):\n log.info(\"{0} {1} {2}\".format(self.get_timestamp(), self.get_channel(), self.get_data()))\n\n\n\nclass DataList(object):\n def __init__(self):\n self.data = []\n\n\n def add_data(self, data): self.data.append(data)\n\n\n def get_last_items(self, channel, last_item=False, amount=False):\n # Get the newest items since the last_item object\n \n # Make a copy to work with, there could occur changes while running this method\n data_list = self.get_data_list(channel)[:]\n\n # When first run, return all the data received so far\n if not last_item:\n return data_list\n\n return_list = []\n\n # Reverse cycle through list_date, searching for last_item and adding all the\n # found objects to return_list on the way\n for data in reversed(data_list):\n if data == last_item:\n return list(reversed(return_list))\n\n return_list.append(data)\n\n log.error('Could not find last_item: {0}'.format(last_item))\n return False\n\n\n def get_data_list(self, channel):\n return_list = []\n data_tmp = self.data[:]\n for data in data_tmp:\n if int(data.get_channel()) == int(channel):\n return_list.append(data)\n\n return return_list\n\n\n\nclass Socket(object):\n def connect(self, host, port):\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n log.info('Client socket created')\n except socket.error as e:\n log.error('Failed to create client socket: {0}'.format(e))\n return False\n #self.socket = socket.socket()\n self.socket.settimeout(5)\n try:\n self.socket.connect((host, port))\n log.info('Connected to server')\n return True\n except socket.error as e:\n log.error('Failed to connect to server: {0}'.format(e))\n return False\n\n\n def send(self, data, prefix=True):\n # TODO check if connection is still alive\n if prefix:\n data = str(len(data)).rjust(3, '0') + data\n\n data_bytes = data.encode()\n\n try:\n self.socket.sendall(data_bytes)\n log.info('<<< {0}'.format(data))\n return True\n except socket.error as e:\n #log.error('Failed to send data: {0}'.format(e))\n return False\n\n\n def is_alive(self):\n pass\n\n\n def receive(self, bits):\n # TODO check if connection is still alive\n try:\n data = self.socket.recv(int(bits)).decode('utf-8')\n return self.sanitize(data)\n except socket.error as e:\n #log.error('Failed to receive data: {0}'.format(e))\n return False\n\n\n def sanitize(self, data):\n return data.strip('\\n')\n\n\n def close(self):\n self.socket.close()\n \n\n\nclass PlotThread(StoppableThread):\n def __init__(self):\n StoppableThread.__init__(self)\n\n\n def setup(self):\n self.fig = plt.figure()\n self.ax1 = self.fig.add_subplot(1,1,1)\n\n\n def animate(self):\n xar = []\n yar = []\n\n for data in datalist.get_data_list():\n xar.append(data.get_data())\n yar.append(data.get_timestamp())\n self.ax1.clear()\n self.ax1.plot(xar,yar)\n\n\n def plot(self):\n self.setup()\n ani = animation.FuncAnimation(self.fig, self.animate, interval=1000)\n plt.show()\n print('lkjj')\n\n\n def run(self):\n print('lkjj')\n self.plot()\n while True:\n pass\n\n\n\nclass EEG(object):\n def __init__(self, config, datalist):\n self.config = config\n self.ads1299 = ADS1299()\n self.datalist = datalist\n\n\n def get_file(self, filename):\n \"\"\" Get contents of a file and put every line in a list\"\"\"\n contents = []\n try:\n f = open(filename, 'r')\n except IOError as e:\n log.error('No config file found at: {0}'.format(filename))\n return False\n\n for line in f:\n if line:\n contents.append(line)\n f.close()\n\n if contents:\n return contents\n return False\n\n\n def start_threads(self):\n self.running_threads = []\n self.running_threads.append(QtThread(self.config).start())\n\n\n def stop_threads(self):\n for thread in self.running_threads:\n thread.stop()\n\n\n def test(self, var):\n try:\n int(var)\n return True\n except:\n return False\n\n\n def get_data(self):\n # Receive length of data first\n skipped = 0\n while True:\n length = self.socket.receive(4)\n if length:\n if length[0] == '#':\n length = length[1:]\n skipped = 0\n break\n else:\n skipped += 1\n\n if skipped >= 3:\n log.error('lots of skipping frames... returning False')\n return False\n\n if self.test(length):\n # then get the full frame \n data = self.socket.receive(length)\n if data:\n if self.add_data(data):\n return True\n else:\n log.error('Error getting data, no data')\n return True\n else:\n log.error('Error getting data, length is corrupted: {0}'.format(length))\n return True\n return True\n\n\n def get_timestamp(self):\n return int(strftime(\"%d%H%M%S\"))\n\n\n def add_data(self, rd):\n # Create data objects and add to datalist object\n rd = rd.split(',')\n if len(rd) != 11:\n log.error('Data is corrupted!: {0}'.format(','.join(rd)))\n return False\n\n channel = 0\n # Skip the status bits start at byte 4\n for d in rd[3:11]:\n channel += 1\n if self.config.get('channel'+ str(channel), 'state') == 'on':\n if d == 0:\n log.error('Data is 0')\n return False\n data = Data(self.config)\n data.set_channel(channel)\n data.set_data(self.map_data(d, -8388607, 8388607, -0.3, 0.3))\n #data.display()\n self.datalist.add_data(data)\n return True\n\n\n def map_data(self, x, in_min, in_max, out_min, out_max):\n x = float(x)\n in_min = float(in_min)\n in_max = float(in_max)\n out_min = float(out_min)\n out_max = float(out_max)\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n\n\n def send_settings(self):\n for reg in self.ads1299.get_all_regs():\n self.socket.send('WREG,{0},{1}'.format(reg[0], reg[1]))\n\n\n def set_srb1(self, state=True):\n if state:\n self.config.set('srb1', 'state', 'ground')\n self.ads1299.set_srb1()\n return\n else:\n self.config.set('srb1', 'state', 'disconnected')\n self.ads1299.set_srb1(state=False)\n return\n\n\n def set_ref(self, state=True):\n if state:\n self.config.set('general', 'reference', 'internal')\n self.ads1299.set_internal_ref()\n return\n else:\n self.config.set('general', 'internal', 'external')\n self.ads1299.set_internal_ref(state=False)\n return\n\n\n def set_channel(self, channel, state=True):\n if state:\n log.info('Channel{0} is on'.format(channel))\n self.config.set('channel' + str(channel), 'state', 'on')\n self.ads1299.set_channel(channel)\n else:\n log.info('Channel{0} is off'.format(channel))\n self.config.set('channel' + str(channel), 'state', 'off')\n self.ads1299.set_channel(channel, state=False)\n\n\n def set_gain(self, channel, level):\n log.info('Gain on Channel{0} is set to: {1}'.format(channel, level))\n self.config.set('channel' + str(channel), 'gain', level)\n self.ads1299.set_gain(channel, level)\n\n\n def send_start(self):\n self.socket.send('START')\n\n\n def send_stop(self):\n self.socket.send('STOP')\n\n\n def usage(self):\n print(\"PYEEG\")\n print(\"OPTIONS:\")\n print(\" --noise-check\")\n print(\" --test-signal\")\n print(\" --plot\")\n print(\" --start\")\n print(\" --run-time=\")\n print(\" --channels=1,2,3,4,5,6,7,8\")\n print(\" --gain= or \")\n print(\" level = 1,2,4,6,8,12,24\")\n\n print(\" --shutdown\")\n\n\n def set_config_defaults(self):\n self.config.set('server', 'address', 'alarmpi')\n self.config.set('server', 'port', '8888')\n self.config.set('general', 'length-size', '3')\n self.config.set('general', 'run-time', '-1')\n self.config.set('general', 'reference', 'internal')\n self.config.set('general', 'refresh', '0.1')\n self.config.set('channel1', 'state', 'on')\n self.config.set('channel2', 'state', 'on')\n self.config.set('channel3', 'state', 'on')\n self.config.set('channel4', 'state', 'on')\n self.config.set('channel5', 'state', 'on')\n self.config.set('channel6', 'state', 'on')\n self.config.set('channel7', 'state', 'on')\n self.config.set('channel8', 'state', 'on')\n self.config.set('channel1', 'gain', '24')\n self.config.set('channel2', 'gain', '24')\n self.config.set('channel3', 'gain', '24')\n self.config.set('channel4', 'gain', '24')\n self.config.set('channel5', 'gain', '24')\n self.config.set('channel6', 'gain', '24')\n self.config.set('channel7', 'gain', '24')\n self.config.set('channel8', 'gain', '24')\n self.config.set('srb1', 'state','ground')\n self.config.set('log', 'path', '/home/eco/eeg.txt')\n\n\n def handle_arg(self):\n if len(sys.argv) < 2 or \"--help\" in sys.argv:\n return self.usage()\n\n # Setting config values\n for arg in sys.argv:\n\n if len(arg.split('=')) == 2:\n key = arg.split('=')[0][2:]\n value = arg.split('=')[1]\n\n if '--run-time=' in arg:\n self.config.set('general', key, value)\n\n elif '--address=' in arg:\n self.config.set('server', key, value)\n\n elif '--port=' in arg:\n self.config.set('server', key, value)\n\n elif '--channels=' in arg:\n self.set_channel(value)\n\n elif '--gain=' in arg:\n self.set_gain(value)\n\n # Commands\n for arg in sys.argv:\n if \"--plot\" in arg:\n self.start_threads()\n\n if \"--start\" in arg:\n # NOTE is not receiving on the c side!!\n self.send_channel_config()\n self.socket.send('START')\n self.get_data()\n\n elif \"--noise-check\" in arg:\n self.socket.send('NOISECHECK')\n self.get_data()\n\n elif \"--test-signal\" in arg:\n self.socket.send('TESTSIGNAL')\n self.get_data()\n\n elif \"--shutdown\" in arg:\n self.socket.send('SHUTDOWN')\n\n return False\n\n\n def connect(self):\n # Create socket\n self.socket = Socket()\n return self.socket.connect(self.config.get('server', 'address'), self.config.get('server', 'port'))\n\n\n def disconnect(self):\n log.info('Connection to server is closed')\n self.socket.close()\n\n\nlog = Log()\n\n\nif __name__ == \"__main__\":\n config = Config()\n datalist = DataList()\n ads1299 = ADS1299()\n log = Log()\n log.color('>>>', 'green')\n log.color('<<<', 'blue')\n log.color('###', 'red')\n log.color('---', 'blue')\n\n app = EEG(config)\n app.run()\n","sub_path":"bin/pyeeg.py","file_name":"pyeeg.py","file_ext":"py","file_size_in_byte":33969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"136528570","text":"import os\n\ncwd = os.getcwd()\n\nmovie_list = [\"BatvSup\", \"SuicideSquad\", \"Stoker\", \"TheRoom\", \"MemoriesofMurder\", \"Madeo\", \"AllaboutMyWife\", \"ColdEyes\", \"OurSunhi\"]\n\nfor index, movie in enumerate(movie_list):\n\tif index%5 == 3: \n\t\tprint(\"Making new folder for %s\" %movie)\n\t\tos.system(\"mkdir ../%s\" %(movie))\n\n\t\tos.chdir(\"../\")\n\t\tprint(\"Darknet run started for %s\" %(movie))\n\t\tos.system(\"./darknet detector demo cfg/combine9k.data cfg/yolo9000.cfg ../yolo9000-weights/yolo9000.weights -prefix ./%s/frame ./%s_2fps.mp4 -thresh 0.15\" %(movie, movie))\n\t\tprint(\"Darknet completed for %s\" %(movie))\n\t\n\t\tprint(\"Make new Collection folder for %s\" %(movie))\n\t\tos.system(\"mkdir %s\" %(movie))\n\t\tos.system(\"mv %s_2fps.mp4_Detected_objects.csv %s_Detected_objects.csv\" %(movie, movie))\n\t\tos.chdir(\"./Collection\")\n\t\tos.system(\"mv ../%s_Detected_objects.csv ./%s/\" %(movie, movie))","sub_path":"loop_darknet3.py","file_name":"loop_darknet3.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"311755606","text":"#!/usr/bin/env python3\n\"\"\" This is a script that correlates with the 1st chapter of the 'Head First Python' book \"\"\"\n\ndef print_lol(the_list, level=0):\n \"\"\"\n Simple function that takes a list, and then checks for lists within lists. If a list within a list\n exists we use resursion (a function calling itself) to print the inner list. Once we reach a point\n where there are no more nested lists, we will print that remaining data.\n\n Parameters:\n the_list - a python list of data\n level - number of tab levels to indent\n \"\"\"\n for each_item in the_list:\n # isinstance is a BIF for python (Built In Function)\n if isinstance(each_item, list):\n # Recursion\n print_lol(each_item, level)\n else:\n for tab_stop in range(level):\n print (\"\\t\", end='')\n print(each_item)\n\n# Nested lists\nmovies = [ \"The Holy Grail\", 1973, \"Terry Jones & Terry Gilliam\", 91,\n [ \"Graham Chapman\",\n [ \"Michael Palin\", \"John Cleese\", \"Terry Gilliam\", \"Eric Idle\", \"Terry Jones\"]\n ]\n ]\n\n# Call the print_lol function and pass it the movies list.\n#print_lol(movies) # chapter 1 version\n#print_lol(movies, 1) # chapter 2 version\n","sub_path":"nester_nb/nester.py","file_name":"nester.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"400520228","text":"class Solution:\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n res = []\n # 路径缓存-集合类型\n cache = {beginWord}\n # 用于保存路径,最后逆向遍历出原路径\n pathDict = {}\n pathDict[beginWord] = None\n # 路径集合,查找效率比数组高\n wordDict = set(wordList)\n # 队列 优化为直接保存路径\n queue = [[beginWord]]\n lenOfString = len(beginWord)\n flag = True\n while len(queue) > 0:\n queueSize = len(queue)\n nextLevel = []\n for i in range(queueSize):\n curPath = queue.pop(0)\n cur = curPath[-1]\n if cur == endWord:\n res.append(curPath)\n break\n if not flag:\n break\n for j in range(lenOfString):\n for x in range(26):\n newStr = cur[:j] + chr(97+x) + cur[j+1:]\n if newStr not in cache and newStr in wordDict:\n newPath = [] + curPath\n newPath.append(newStr)\n queue.append(newPath)\n nextLevel.append(newStr)\n cache.update(nextLevel)\n if not res: return []\n return res","sub_path":"LeetCode/126. 单词接龙 II(bfs).py","file_name":"126. 单词接龙 II(bfs).py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"270209981","text":"\"\"\"\nЧисло 197 называется круговым простым числом, потому что все перестановки\nего цифр с конца в начало являются простыми числами: 197, 719 и 971.\n\nСуществует тринадцать таких простых чисел меньше 100:\n2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79 и 97.\n\nСколько существует круговых простых чисел меньше миллиона?\n\"\"\"\n\n\nimport itertools as iter\n\n\ndef dividers(n):\n i = 2\n primfac = []\n while i * i <= n:\n while n % i == 0:\n primfac.append(i)\n n = n / i\n i = i + 1\n if n > 1:\n primfac.append(n)\n simples_uniq = list(set(primfac))\n counts = [[j for j in range(primfac.count(i) + 1)] for i in simples_uniq]\n delims = []\n for i in iter.product(*counts):\n delim = 1\n for j in range(len(i)):\n delim *= simples_uniq[j]**i[j]\n delims.append(int(delim))\n return sorted(delims)\n\n\nres_len = 0\n\nfor i in range(2, 1000000):\n if len(dividers(i)) <= 2:\n str_i = str(i)\n for j in str_i:\n if len(dividers(int(str_i))) > 2:\n break\n str_i = str_i[-1] + str_i[0: -1]\n else:\n res_len += 1\n\nprint(res_len)\n","sub_path":"35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"155272757","text":"# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# https://github.com/brkt/brkt-cli/blob/master/LICENSE\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport os\nfrom brkt_cli.encryptor_service import (\n encryptor_did_single_disk,\n wait_for_encryptor_up,\n wait_for_encryption,\n)\nfrom brkt_cli.util import Deadline\nfrom brkt_cli.esx.esx_service import (\n launch_mv_vm_from_s3,\n validate_local_mv_ovf\n)\n\n\nlog = logging.getLogger(__name__)\n\n\ndef update_ovf_image_mv_vm(vc_swc, enc_svc_cls, values, guest_vm, mv_vm,\n user_data_str, static_ip=None):\n new_root_disk_name = None\n try:\n # Reconfigure VM with more CPUs and memory\n vc_swc.reconfigure_vm_cpu_ram(mv_vm)\n if static_ip:\n vc_swc.configure_static_ip(mv_vm, static_ip)\n # Clone the first disk of the encrypted guest VM and attach the\n # clone to the MV VM.\n log.info(\"Cloning guest root disk\")\n guest_root_disk = vc_swc.get_disk(guest_vm, unit_number=0)\n guest_root_disk_name = vc_swc.get_disk_name(guest_root_disk)\n new_root_disk_name = vc_swc.get_session_vmdk_name(guest_root_disk_name)\n vc_swc.clone_disk(source_disk=guest_root_disk,\n dest_disk_name=new_root_disk_name)\n vc_swc.add_disk(mv_vm, filename=new_root_disk_name, unit_number=1)\n # Power on the MV VM and wait for encryption\n vc_swc.power_on(mv_vm)\n # Send user data\n vc_swc.send_userdata(mv_vm, user_data_str)\n ip_addr = vc_swc.get_ip_address(mv_vm)\n log.info(\"MV VM ip address is %s\", ip_addr)\n # wait for encryption to complete\n host_ips = [ip_addr]\n enc_svc = enc_svc_cls(host_ips, port=values.status_port)\n log.info('Waiting for updater service on port %s on %s',\n enc_svc.port, ', '.join(host_ips))\n wait_for_encryptor_up(enc_svc, Deadline(600))\n try:\n wait_for_encryption(enc_svc)\n except Exception as e:\n log.exception(\"Update failed with error %s\", e)\n raise\n\n single_disk = encryptor_did_single_disk(enc_svc)\n\n # Power off the MV VM\n vc_swc.power_off(mv_vm)\n\n # Create final disk attachment\n if single_disk:\n guest_root_disk = vc_swc.detach_disk(guest_vm, unit_number=0)\n new_root_disk = vc_swc.detach_disk(mv_vm, unit_number=1)\n vc_swc.add_disk(guest_vm,\n filename=vc_swc.get_disk_name(new_root_disk),\n unit_number=0)\n else:\n guest_old_disk = vc_swc.detach_disk(guest_vm, unit_number=1)\n mv_old_disk = vc_swc.detach_disk(guest_vm, unit_number=0)\n # Clone and attach new MV disk to guest VM\n log.info(\"Cloning Metavisor disk\")\n new_disk = vc_swc.get_disk(mv_vm, unit_number=0)\n u_disk_name = vc_swc.clone_disk(source_disk=new_disk,\n dest_disk=mv_old_disk)\n # Add disks to guest VM\n vc_swc.add_disk(guest_vm, filename=u_disk_name, unit_number=0)\n vc_swc.add_disk(guest_vm,\n filename=vc_swc.get_disk_name(guest_old_disk),\n unit_number=1)\n vc_swc.delete_disk(new_root_disk_name)\n new_root_disk_name = None\n\n # Create images\n if values.encrypted_ovf_name:\n log.info(\"Creating images\")\n if values.target_path is None:\n raise Exception(\"Cannot create ova/ovf as target path is None\")\n if values.create_ova:\n # delete the old mf file\n os.remove(os.path.join(values.target_path,\n values.encrypted_ovf_name + \".mf\"))\n # import the new OVF\n ovf = vc_swc.export_to_ovf(guest_vm, values.target_path,\n ovf_name=values.encrypted_ovf_name)\n if values.create_ova:\n if values.ovftool_path is not None:\n # delete the old ova\n os.remove(os.path.join(values.target_path,\n values.encrypted_ovf_name + \".ova\"))\n ova = vc_swc.convert_ovf_to_ova(values.ovftool_path, ovf)\n print(ova)\n else:\n print(ovf)\n else:\n # delete the old vm template\n template_vm = vc_swc.find_vm(values.template_vm_name)\n old_template_vm_name = None\n if template_vm:\n old_template_vm_name = values.template_vm_name + \"-\" + \\\n vc_swc.session_id\n log.info(\"Renaming the old template to %s\",\n old_template_vm_name)\n try:\n vc_swc.rename_vm(template_vm, old_template_vm_name)\n except Exception as e:\n if \"vim.fault.FileFault\" not in str(e):\n raise\n log.info(\"Rename VM not supported. \"\n \"Deleting the old template %s.\", template_vm.name)\n if not vc_swc.find_vm(values.template_vm_name):\n vc_swc.change_vm_name(template_vm,\n values.template_vm_name)\n vc_swc.destroy_vm(template_vm)\n # clone the vm to create template\n log.info(\"Creating the template VM\")\n template_vm = vc_swc.clone_vm(guest_vm,\n vm_name=values.template_vm_name,\n template=True)\n print(vc_swc.get_vm_name(template_vm))\n if old_template_vm_name:\n old_template = vc_swc.find_vm(old_template_vm_name)\n if old_template:\n log.info(\"Deleting the old template\")\n vc_swc.destroy_vm(old_template)\n except Exception as e:\n log.exception(\"Failed to update the image with error %s\", e)\n if new_root_disk_name:\n vc_swc.delete_disk(new_root_disk_name)\n raise\n finally:\n vc_swc.destroy_vm(guest_vm)\n vc_swc.destroy_vm(mv_vm)\n log.info(\"Done\")\n\n\ndef launch_guest_vm(vc_swc, values):\n log.info(\"Launching encrypted guest VM\")\n if values.template_vm_name:\n template_vm = vc_swc.find_vm(values.template_vm_name)\n vm = vc_swc.clone_vm(template_vm)\n elif values.encrypted_ovf_name:\n if values.create_ova:\n ova = os.path.join(values.target_path,\n values.encrypted_ovf_name + \".ova\")\n vc_swc.convert_ova_to_ovf(values.ovftool_path, ova)\n vm = vc_swc.upload_ovf_to_vcenter(values.target_path,\n values.encrypted_ovf_name + \".ovf\",\n validate_mf=False)\n else:\n log.error(\"Cannot launch guest VM without template VM/OVF/OVA\")\n vm = None\n return vm\n\n\ndef update_from_s3(vc_swc, enc_svc_cls, values, download_file_list=None,\n user_data_str=None, static_ip=None):\n guest_vm = None\n mv_vm = None\n try:\n guest_vm = launch_guest_vm(vc_swc, values)\n except Exception as e:\n log.exception(\"Failed to lauch guest VM (%s)\", e)\n if (guest_vm is not None):\n vc_swc.destroy_vm(guest_vm)\n raise\n try:\n if values.source_image_path is None or download_file_list is None:\n log.error(\"Cannot get metavisor OVF from S3\")\n raise Exception(\"Invalid MV OVF\")\n mv_vm = launch_mv_vm_from_s3(vc_swc, values.source_image_path,\n download_file_list,\n vm_name=None, cleanup=values.cleanup)\n except Exception as e:\n log.exception(\"Failed to launch metavisor OVF from S3 (%s)\", e)\n if (mv_vm is not None):\n vc_swc.destroy_vm(mv_vm)\n if (guest_vm is not None):\n vc_swc.destroy_vm(guest_vm)\n raise\n update_ovf_image_mv_vm(vc_swc, enc_svc_cls, values, guest_vm, mv_vm,\n user_data_str, static_ip)\n\n\ndef update_from_local_ovf(vc_swc, enc_svc_cls, values, user_data_str=None,\n static_ip=None):\n guest_vm = None\n mv_vm = None\n if values.source_image_path is None or values.image_name is None:\n log.error(\"Metavisor OVF path needs to be specified\")\n return\n try:\n guest_vm = launch_guest_vm(vc_swc, values)\n except Exception as e:\n log.exception(\"Failed to lauch guest VM (%s)\", e)\n if (guest_vm is not None):\n vc_swc.destroy_vm(guest_vm)\n raise\n try:\n log.info(\"Launching MV VM from local OVF\")\n validate_local_mv_ovf(values.source_image_path, values.image_name)\n mv_vm = vc_swc.upload_ovf_to_vcenter(values.source_image_path,\n values.image_name)\n except Exception as e:\n log.exception(\"Failed to launch from metavisor OVF (%s)\", e)\n if (mv_vm is not None):\n vc_swc.destroy_vm(mv_vm)\n if (guest_vm is not None):\n vc_swc.destroy_vm(guest_vm)\n raise\n update_ovf_image_mv_vm(vc_swc, enc_svc_cls, values, guest_vm, mv_vm,\n user_data_str, static_ip)\n\n\ndef update_from_vmdk(vc_swc, enc_svc_cls, values, user_data_str=None,\n static_ip=None):\n guest_vm = None\n mv_vm = None\n if values.encryptor_vmdk is None:\n log.error(\"Metavisor VMDK is not specified\")\n return\n try:\n guest_vm = launch_guest_vm(vc_swc, values)\n except Exception as e:\n log.exception(\"Failed to lauch guest VM (%s)\", e)\n if (guest_vm is not None):\n vc_swc.destroy_vm(guest_vm)\n raise\n try:\n # Add datastore path to the vmdk\n metavisor_vmdk_path = vc_swc.get_datastore_path(values.encryptor_vmdk)\n # Create a metavisor VM\n vm = vc_swc.create_vm()\n # Attach metavisor vmdk as root disk\n vc_swc.add_disk(vm, filename=metavisor_vmdk_path, unit_number=0)\n except Exception as e:\n log.exception(\"Failed to launch metavisor VMDK (%s)\", e)\n if (mv_vm is not None):\n vc_swc.destroy_vm(mv_vm)\n if (guest_vm is not None):\n vc_swc.destroy_vm(guest_vm)\n raise\n update_ovf_image_mv_vm(vc_swc, enc_svc_cls, values, guest_vm, mv_vm,\n user_data_str, static_ip)\n","sub_path":"brkt_cli/esx/update_vmdk.py","file_name":"update_vmdk.py","file_ext":"py","file_size_in_byte":11072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"420036473","text":"import vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType, VkLongpollMode\nfrom vk_api.utils import get_random_id\n\ndef main():\n vk_session = vk_api.VkApi(token=TOKEN_vk)\n vk = vk_session.get_api()\n longpoll = VkLongPoll(vk_session)\n\n for event in longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:\n vk.messages.send(\n user_id=event.user_id,\n random_id=get_random_id(),\n message='nothing'\n )\n print('{} sent text message: {}'.format(event.user_id, event.text))\n elif event.type == VkEventType.MESSAGE_NEW and event.to_me:\n if event.raw[7]['attach1_type'] == 'photo':\n\n print(vk.messages.getHistoryAttachments(peer_id=event.user_id,\n media_type='photo',\n start_from=0,\n count=1,\n photo_sizes=0,\n preserve_order=1\n ).get('items')[0]['attachment']['photo']['sizes'][-1]['url'])\n # ['items'][0]['attachment']['photo']['sizes'][3]['url']\n\n\nif __name__ == '__main__':\n main()","sub_path":"test_smth.py","file_name":"test_smth.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"76469777","text":"import random\n\n# класс игрового корабля.\nclass Ship:\n\n def __init__(self, x, y, decks, degree = 1):\n self.x = x\n self.y = y\n self.decks = decks\n self.life = decks\n self.degree = degree\n self.is_dead = False\n\n\n def get_status(self):\n return self.is_dead\n\n def shot(self):\n self.life -= 1\n if self.life == 0:\n self.is_dead = True\n return True\n\n @property\n def positions(self):\n self.cells = []\n if self.degree:\n for i in range(self.decks):\n self.cells.append((self.x+i, self.y))\n else:\n for i in range(self.decks):\n self.cells.append((self.x, self.y+i))\n return self.cells\n\n\n# Игровая доска. Для игры их создается две: под компьютер и под игрока.\nclass Board:\n\n def __init__(self, name):\n self.board = [[i for i in range(1,7)]] + [[0 for i in range(6)] for i in range(6)]\n self.name = name\n self.all_ships = []\n self.shots = None\n self.forbid_turns = []\n self.problem = []\n\n\n # метод проверки есть ли еще живые корабли - если нет, то у нас есть победитель\n def get_winner(self):\n for ship in self.all_ships:\n if not ship.get_status():\n print('\\nВ море еще остались враги! Покажем им в следующем раунде!\\n***********\\n\\n')\n return False\n print(f'\\n{self.name} побеждает в этом раунде! Поздравляю!\\n***********\\n\\n')\n return True\n\n\n # метод вывода текущего состояния доски для игры\n def print_board(self):\n for index, line in enumerate(self.board):\n line = list(map(str, line))\n print(index, ' '.join(line))\n\n\n # метод для показа текущего расположения кораблей и выстрелов - для тестирования\n # данный метод планировалось использовать вместо show_ships, однако этот метод меняет аттрибут self.board\n # на момент сдачи работы решить из-за чего возникает проблема не удалось\n def show_ships_test(self):\n new_board = self.board.copy()\n for ship in self.all_ships:\n for position in ship.positions:\n x, y = position\n if new_board[x][y] != 'x' and new_board[x][y] != '■':\n new_board[x][y] = '■'\n for index, line in enumerate(new_board):\n line = list(map(str, line))\n print(index, ' '.join(line))\n\n\n # метод для показа текущего расположения кораблей игроку, чтобы было понимание куда можно размещать, а куда нет\n # изначально планировалось использовать для этой цели show_ships_test(self)\n def show_ships(self):\n new_board = [[i for i in range(1,7)]] + [[0 for i in range(6)] for i in range(6)]\n for ship in self.all_ships:\n for position in ship.positions:\n x, y = position\n new_board[x][y] = '■'\n for index, line in enumerate(new_board):\n line = list(map(str, line))\n print(index, ' '.join(line))\n\n\n # метод для проверки на возможность разместить корабль в данной точке\n def check_empty(self, cell):\n if self.all_ships is None:\n return True\n else:\n for ship in self.all_ships:\n if cell in ship.positions:\n return False\n else:\n if cell in self.forbid_turns:\n return False\n return True\n\n\n # метод проверки что координата находится в рамках игрового поля\n def check_in_field(self, cell):\n x,y = cell\n if x in range(1,len(self.board)) and y in range(0,len(self.board[0])):\n return True\n else:\n return False\n\n\n # метод генераниции клеток в которые запрещено ставить корабли\n # берет клетку создаваемого коробля и в пишет в запретные клетки все клетки вокруг нее в радиусе 1\n def gener_forbid_turns(self, cell):\n x,y = cell\n for row in range(-1, 2):\n for col in range(-1, 2):\n if x + row > 0 and y + col >= 0:\n if (x + row, y + col) not in self.forbid_turns \\\n and (x + row, y + col) != (x,y):\n self.forbid_turns.append((x + row, y + col))\n\n\n # метод размещения кораблей на полей\n def set_ship(self,*args):\n new_ship = Ship(*args)\n go = []\n for cell in new_ship.positions:\n # проверяем, чтобы каждая клетка была свободна от другого корабля, клетка не находилась в списке\n # запретных клеток (правило - миниму 1 клетка от соседнего корабля), а также клетка корабля должна\n # находится в рамках текущего игрового поля\n result = self.check_empty(cell) and self.check_in_field(cell)\n go.append(result)\n\n if all(go):\n if self.all_ships is None:\n self.all_ships = []\n self.all_ships.append(new_ship)\n for i, cell in enumerate(new_ship.positions):\n self.gener_forbid_turns(cell)\n return True\n else:\n return False\n\n\n # метод выделения подбитого корабля. Если корабль уничтожен, то все клетки в радиусе 1 меняют внешний вид\n # для обозначения контура подбитого корабля и знак игроку, что стрелять туда не имеет смысла\n def ship_dead(self, positions):\n for cell in positions:\n x,y = cell\n for row in range(-1, 2):\n for col in range(-1, 2):\n if x + row in range(1, len(self.board)) and y + col in range(0,len(self.board[0]))\\\n and ((x + row, y + col) != cell and self.board[x + row][y + col] != 'x' and self.board[x + row][y + col] != '■'):\n self.board[x + row][y + col] = '•'\n\n\n # метод общения игрока с программой - фактически просит ввести координаты и проверяет ввод является числами\n # и эти числа находятся в рамках игрового поля\n def user_choice(self):\n text = f'Ваши координаты должны быть целыми числами от 1 до {len(self.board[0])}.'\n while True:\n x = input ('\\nВыберите ряд - ')\n try:\n x = int(x)\n if x not in range(1,len(self.board)):\n print(text)\n else:\n break\n except ValueError:\n print(text)\n while True:\n y = input('Выберите колонку - ')\n try:\n y = int(y) - 1\n if y < 0 or y > len(self.board[0]):\n print(text)\n else:\n break\n except ValueError:\n print(text)\n return x,y\n\n\n # метод - выстрел человека\n def shot_user(self):\n while True:\n print(f'{self.name} приготовиться к атаке! Вывожу последние данные:')\n self.print_board()\n x,y = self.user_choice()\n go = self.shot(x,y)\n if go:\n break\n print('Ой! Мы уже стреляли в эту клетку! Надо выбрать другую.\\n')\n\n\n # обработка выстрела - получаем координаты от игрока или компьютера и обрабатываем их\n def shot(self, x, y):\n x,y = x,y\n if self.shots is None:\n self.shots = []\n if (x, y) in self.shots:\n return False\n for ship in self.all_ships:\n if (x, y) in ship.positions:\n print('Попадание! Так держать коммандор!\\n')\n self.board[x][y] = 'x'\n ship.shot()\n if ship.get_status():\n self.ship_dead(ship.positions)\n print('Корабль врага повержен!\\n')\n self.shots.append((x, y))\n return True\n print('Мимо! В следующий раз точно повезет!\\n')\n self.board[x][y] = '•'\n self.shots.append((x, y))\n return True\n\n\n # метод - выстрел компьютера\n def shot_ai(self):\n while True:\n x,y = (random.randint(1, 6),random.randint(0, 5))\n if self.shots is None:\n self.shots = []\n if (x,y) not in self.shots:\n break\n print(f'{self.name} выбирает для выстрела клетку - {x},{y + 1}!')\n self.shot(x,y)\n\n\n # метод-генерация доски игроком\n def set_user_deck(self):\n decks = (1,1,1,3,)\n for i, deck in enumerate(decks):\n while True:\n print(f'Размещаем корабль {i + 1}. Количество палуб - {deck}.')\n self.show_ships()\n degree = 1\n if deck > 1:\n while True:\n degree = input('''Это большой корабль! Выберите положение для корабля:\n1 - следующие палубы пойдут от стартовой координаты вниз.\n0 - следующие палубы пойдут от стартовой координаты вправо.\nВы выбираете - ''')\n try:\n degree = int(degree)\n if degree in range(0,2):\n break\n else:\n print('Ошибка! Вы должны напечатать 1 или 0. Попробуйте снова.')\n except ValueError:\n print('Ошибка! Вы должны напечатать 1 или 0. Попробуйте снова.')\n x,y = self.user_choice()\n if self.set_ship(x, y, deck, degree):\n print('Отлично! Корабль размещен!\\n')\n break\n else:\n print('Командор! У нас ошибка: эта зона занята другим кораблем. Попробуем разместить заново.')\n print('Отлично! Все корабли на месте. Начинаем игру.')\n\n\n # метод-генерация доски компьютером\n def set_ai_deck(self):\n decks = (1,1,1,3,)\n for deck in decks:\n while True:\n x,y = random.randint(1,6), random.randint(0,5)\n degree = random.randint(0,1)\n go = self.set_ship(x,y,deck,degree)\n if go:\n break\n\n\n# данный класс отвечает за запуск игры, и режимы игры: игра между ИИ (для поиска проблем), игра между компьютером\n# человеком. По идее используя данный контроллер и класс Board можно еще написать вариант игры между 2 игроками\nclass GameContoller:\n def __init__(self):\n self.comp_1 = 'Comp 1'\n self.comp_2 = 'Comp 2'\n\n # рандомайзер хода для теста Комп против компа\n def who_is_first_ai(self, deck_1, deck_2):\n print(f'Определяем кто начинает партию: {deck_1.username} или {deck_2.username}.')\n turn = random.randint(0,1)\n if turn:\n print(f'Первым ходит {deck_1.username}')\n return deck_1, deck_2\n print(f'Первым ходит {deck_2.username}')\n return deck_2, deck_1\n\n # создаем имя игрока для типа игры Комп vs Игрок\n def set_user(self):\n self.user = input('Добро пожаловать в игру!\\nНазовите свое имя: ')\n\n # тестирование игры на компьютерах\n def ai_game(self):\n board_1, board_2 = Board(self.comp_1), Board(self.comp_2)\n print(f'Добро пожаловать в игру - {board_1.username}, {board_2.username} ')\n board_1, board_2 = self.who_is_first_ai(board_1, board_2)\n print('Резмещаем свои корабли!')\n board_1.set_ai_deck()\n board_2.set_ai_deck()\n while True:\n print(f'Ход игрока {board_1.username}')\n board_1.print_board()\n print()\n board_1.shot_ai()\n board_1.print_board()\n game_over = input('Едем дальше? - ')\n if game_over:\n break\n game_over = board_1.get_winner()\n if game_over:\n break\n board_1, board_2 = board_2, board_1\n\n # метод для запуска игры между компьютером или человеком\n # он получился немного перегруженный - не успел нормально переписать\n def game(self):\n self.set_user()\n userBoard, aiBoard = Board(self.user), Board(self.comp_1)\n userBoard.set_ai_deck()\n aiBoard.set_user_deck()\n user_start = random.randint(0,1)\n\n print('\\nВеликий рандом решил, что человек будет ходить первым!') if user_start else print('\\nВеликий рандом решил, что компьютер будет ходить первым!')\n while True:\n if user_start:\n userBoard.shot_user()\n print('Вывожу результат:')\n userBoard.print_board()\n if userBoard.get_winner():\n break\n\n aiBoard.shot_ai()\n print('Вывожу результат:')\n aiBoard.print_board()\n if aiBoard.get_winner():\n break\n else:\n aiBoard.shot_ai()\n print('Вывожу результат:')\n aiBoard.print_board()\n if aiBoard.get_winner():\n break\n userBoard.shot_user()\n print('Вывожу результат:')\n userBoard.print_board()\n if userBoard.get_winner():\n break\n\ngame = GameContoller()\ngame.game()","sub_path":"warships/warsClasse.py","file_name":"warsClasse.py","file_ext":"py","file_size_in_byte":15922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"61689411","text":"# fuzzyplotter.py\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom fuzzyset import FuzzySet\nfrom fuzzyvariable import FuzzyVariable\nfrom fuzzifier import FuzzyDataset\n\nclass FuzzyPlotter(object):\n\n def __init__(self, fuzzy_obj, xlabel=None, ylabel=None):\n self.xlabel = xlabel\n self.ylabel = ylabel\n if isinstance(fuzzy_obj, list):\n self.fuzzy_obj = fuzzy_obj\n self._plot_list()\n elif isinstance(fuzzy_obj, FuzzyVariable):\n self.fuzzy_obj = fuzzy_obj\n self._plot_fuzzyvar()\n elif isinstance(fuzzy_obj, FuzzySet):\n self.fuzzy_obj = fuzzy_obj\n self._plot_fuzzyset()\n elif isinstance(fuzzy_obj, FuzzyDataset):\n self.fuzzy_obj = fuzzy_obj\n self._plot_fuzzydata()\n \n## def __call__(self):\n## plt.figure()\n## plt.title('Fuzzy Set')\n## for fuzz in self.fuzzy_obj:\n## plt.plot(fuzz.x, fuzz.m)\n## plt.xlabel('Variable')\n## plt.ylabel('Membership Degree')\n## plt.show()\n\n def _plot_list(self):\n plt.figure()\n plt.title('Fuzzy Set')\n for i, fuzz in enumerate(self.fuzzy_obj):\n plt.plot(fuzz.x, fuzz.m)\n plt.xlim(fuzz.x[0] - 5e-1, fuzz.x[-1] + 5e-1)\n plt.ylim(0, 1.2)\n plt.xlabel('X')\n plt.ylabel('μ(x)')\n plt.show()\n\n def _plot_fuzzyset(self, obj=None):\n plt.figure()\n plt.title('Fuzzy Set')\n plt.plot(self.fuzzy_obj.x, self.fuzzy_obj.m)\n plt.xlim(self.fuzzy_obj.x[0] - 5e-1, self.fuzzy_obj.x[-1] + 5e-1)\n plt.ylim(0, 1.2)\n plt.xlabel('X')\n plt.ylabel('μ(x)')\n plt.show()\n \n def _plot_fuzzyvar(self, obj=None):\n x = self.fuzzy_obj.x\n name = self.fuzzy_obj.name\n fuzz = self.fuzzy_obj.fuzzy\n terms = self.fuzzy_obj.terms\n fig = plt.figure()\n plt.title('{}'.format(name))\n for i, f in enumerate(fuzz):\n plt.plot(x, f.func(x), label=terms[i])\n if self.xlabel is None:\n plt.xlabel('Universe of Discourse')\n else:\n plt.xlabel(self.xlabel)\n if self.ylabel is None:\n plt.ylabel('Membership Degree')\n else:\n plt.ylabel(self.ylabel)\n plt.legend()\n plt.show()\n\n def _plot_fuzzydata(self):\n for i, fvar in enumerate(self.fuzzy_obj.fuzzy_variables.values()):\n plt.figure()\n plt.title('{}'.format(fvar.name))\n x = fvar.x\n terms = fvar.terms\n for j, fuzz in enumerate(fvar.fuzzy):\n s = 'fuzz.func.{}(x, fuzz.prmts)'.format(fuzz.mfunc)\n plt.plot(x, eval(s), label=terms[j])\n plt.xlim(x[0],x[-1])\n plt.ylim(0, 1.2)\n plt.xlabel('{}'.format(fvar.name))\n plt.ylabel('μ(x)')\n plt.legend()\n #plt.show()\n plt.savefig('{}'.format(fvar.name))\n \n## n = len(self.fuzzy_obj)-1\n## fig, axes = plt.subplots(n//2+1, 2, figsize=(16, 16), sharey=True)\n## for i, ax in enumerate(axes.flatten()):\n## try:\n## fvar = self.fuzzy_obj.fuzzy_variables[i]\n## x = fvar.x\n## name = fvar.name\n## fuzz = fvar.fuzzy\n## terms = fvar.terms\n## ax.set_title('{}'.format(name), fontsize=9)\n## for i, f in enumerate(fuzz):\n## s = 'f.func.{}(x, f.prmts)'.format(f.mfunc)\n## ax.plot(x, eval(s), label=terms[i])\n## ax.legend(fontsize=6)\n## ax.set_ylabel('μ(x)')\n## except KeyError:\n## fvar = self.fuzzy_obj.fuzzy_variables[-1]\n## x = fvar.x\n## name = fvar.name\n## fuzz = fvar.fuzzy\n## terms = fvar.terms\n## ax.set_title('{}'.format(name), fontsize=9)\n## for i, f in enumerate(fuzz):\n## s = 'f.func.{}(x, f.prmts)'.format(f.mfunc)\n## ax.plot(x, eval(s), label=terms[i])\n## ax.legend(fontsize=6)\n## ax.set_ylabel('μ(x)')\n## plt.show()\n## plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.4) \n## plt.show()\n\n\n### Testing/Debbuging \n##uod = np.arange(-10,50,0.1)\n##A = FuzzySet(uod)\n##A.set_mf('trimf', [-5,2,12])\n##B = FuzzySet(uod)\n##B.set_mf('trapmf', [8,14,22,28])\n##C = FuzzySet(uod)\n##C.set_mf('gaussmf', [30,4])\n##t = ['Low', 'Average', 'High']\n##fv = FuzzyVariable('Temperature', uod, t)\n##fv.setfuzzy([A,B,C])\n##plot = FuzzyPlotter(fv)\n\n","sub_path":"fdt-o3/fuzzyplotter.py","file_name":"fuzzyplotter.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"596405517","text":"mem=int(input('enter total memory available '))\nblocksize=int(input('enter block size '))\nprocesses=int(input('enter number of processes '))\n\nreq=[]\nfor i in range(processes):\n req.append(int(input('memory required by processes ')))\n\nblocks=int(input(\"enter number of blocks available \"))\nint_frag=0\next_frag=0\nprint(\"Process\\tmem req\\tmem alloc\\tint_frag\")\nj=i=0\nwhile(i\n self.live_markets = [] # list of markets to be processed\n self._setup()\n logger.info('Recorder created %s' % self.stream_id)\n\n def __call__(self, market_books, publish_time):\n \"\"\"Checks market using market book parameters\n function then passes market_book to be processed.\n\n :param market_books: List of Market Book objects\n :param publish_time: Publish time of market book\n \"\"\"\n for market_book in market_books:\n market_id = market_book.get('id')\n self.check_market_book(market_id, market_book)\n if market_id in self.live_markets:\n self.process_market_book(market_book, publish_time)\n\n def check_market_book(self, market_id, market_book):\n \"\"\"Logic used to decide if market_book should\n be processed\n\n :param market_id: Market id\n :param market_book: Market Book object\n \"\"\"\n if market_id not in self.live_markets:\n self.live_markets.append(market_id)\n\n def process_market_book(self, market_book, publish_time):\n \"\"\"Function that processes market book\n\n :param market_book: Market Book object\n :param publish_time: Publish time of market book\n \"\"\"\n raise NotImplementedError\n\n def on_market_closed(self, market_book):\n \"\"\"Function run when market is closed, this\n may execute more than once if update received\n after being closed.\n \"\"\"\n market_id = market_book.get('id')\n market_definition = market_book.get('marketDefinition')\n logger.info('Closing market %s' % market_id)\n self.storage_engine(market_id, market_definition, self.stream_id)\n\n def _setup(self):\n \"\"\"Create stream folder in /tmp # todo\n \"\"\"\n directory = os.path.join('/tmp', self.stream_id)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n def __str__(self):\n return '<%s>' % self.NAME\n\n\nclass StreamRecorder(BaseRecorder):\n \"\"\"Data recorder, records stream data\n to /tmp/market_id, a single market per\n file.\n \"\"\"\n\n NAME = 'STREAM_RECORDER'\n\n def process_market_book(self, market_book, publish_time):\n filename = '%s' % market_book.get('id')\n file_directory = os.path.join('/tmp', self.stream_id, filename)\n\n with open(file_directory, 'a') as outfile:\n outfile.write(\n json.dumps({\n \"op\": \"mcm\",\n \"clk\": None,\n \"pt\": publish_time,\n \"mc\": [market_book]\n }) + '\\n'\n )\n\n if 'marketDefinition' in market_book and market_book['marketDefinition']['status'] == 'CLOSED':\n self.on_market_closed(market_book)\n","sub_path":"flumine/resources/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"41936885","text":"#run_crustal_rupture_default.py\n# import json\n# import git\n# import csv\nimport os\nfrom pathlib import PurePath\nfrom py4j.java_gateway import JavaGateway\nimport datetime as dt\nfrom dateutil.tz import tzutc\n\n\ndef run_task(builder,\n crustal_filename, filekey,\n ddw, distance, max_cumulative_azimuth, min_sub_sects_per_parent,\n strategy, thinning_factor):\n t0 = dt.datetime.utcnow()\n outputfile = output_folder.joinpath(\"ruptset_DEPTH30_ddw%s_jump%s_%s_%s_%s_%s_thin%s.zip\" % (ddw,\n distance, filekey, max_cumulative_azimuth, min_sub_sects_per_parent, strategy, thinning_factor))\n\n print(\"building %s started at %s\" % (outputfile, dt.datetime.utcnow().isoformat()), end=' ')\n\n # Run the task....\n builder\\\n .setMaxJumpDistance(distance)\\\n .setPermutationStrategy(strategy)\\\n .setMaxSubSectionLength(ddw)\\\n .setMinSubSectsPerParent(min_sub_sects_per_parent)\\\n .setMaxCumulativeAzimuthChange(max_cumulative_azimuth)\\\n .setFaultModelFile(crustal_filename)\\\n .setThinningFactor(thinning_factor)\n\n builder.buildRuptureSet()\n\n #capture task metrics\n #duration = (dt.datetime.utcnow() - t0).total_seconds()\n # metrics = ruptureSetMetrics(builder)\n\n #create the output dataset\n builder.writeRuptureSet(str(outputfile))\n print(\"; took %s secs\" % (dt.datetime.utcnow() - t0).total_seconds())\n\nif __name__ == \"__main__\":\n\n #setup the java gateway binding\n gateway = JavaGateway()\n app = gateway.entry_point\n builder = app.getBuilder()\n\n #get the root path for the task local data\n root_folder = PurePath(os.getcwd())\n\n repos = [\"opensha-ucerf3\", \"opensha-commons\", \"opensha-core\", \"nshm-nz-opensha\"]\n #repo_root = root_folder\n output_folder = root_folder.joinpath('tmp').joinpath(dt.datetime.utcnow().isoformat().replace(':','-'))\n os.mkdir(output_folder)\n\n ##Test parameters\n crustal_filename = str(root_folder.joinpath(\"nshm-nz-opensha/data/FaultModels/SANSTVZ2_crustal_opensha.xml\"))\n filekey = \"SANS_TVZ2\"\n strategy = 'UCERF3' #, ] #'POINTS'] #, 'UCERF3' == DOWNDIP]\n distance = 5.0 #, 5.1, 5.2, 5.3]\n ddw = 0.5 #, 1.5, 2.0, 2.5]\n min_sub_sects_per_parent = 2 #,3,4]\n max_cumulative_azimuth = 580.0 #, 600.0]\n thinning_factor = 0.0 #.075 #, 0.2, 0.0]\n\n #test the tests, nomally 1000 for NZ CFM\n max_sections = 1000\n\n #Run the task....\n run_task(builder, crustal_filename, filekey,\n ddw, distance, max_cumulative_azimuth, min_sub_sects_per_parent,\n strategy, thinning_factor)\n\n print(\"Done!\")\n","sub_path":"src/python/automation/arkiv/run_example_crustal_rupture.py","file_name":"run_example_crustal_rupture.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"132861010","text":"import sqlite3\n\nconn = sqlite3.connect('music.db')\n\nc = conn.cursor()\n\nf = open('fisier.txt') #numele fisierului in care ai extras datele\n\n#citim fiecare linie din fisier, ii facem split dupa un separator unic, se fac operatii cu cuvintele reuzultate, dupa care se insereaza in BD\n\nfor line in f:\n words = line.split('separator') #inlocuiesti cu simbolul corect\n\n #operatii cu cuvintele\n\n\n#dupa ce prelucrezi datele, formeaza un nou fisier cu toate datele complete pentru baza de date, in aceasta ordine:\n#artist, songTitle, genre, musicData\n#acolo unde nu ai informatiile, gen lyrics, pune 0 ca linia pe care o formezi in fisier sa aiba toate datele pentru inserare\n\nfile = open ('noul fisier cu toate informatiile')\nfor line in file:\n c.execute('insert into music values (?,?,?,?)', line)\n\nconn.commit()","sub_path":"Database/DatabaseInsert.py","file_name":"DatabaseInsert.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"642502982","text":"from django.conf.urls import patterns, url\n#from django.conf.urls import include\n#from .views import index, index2\n\nurlpatterns = patterns('',\n\t#url(r'^$', 'apps.inicio.views.index'),\n\t#url(r'^$', index.as_view()),\n\n\turl(r'^$', 'django.contrib.auth.views.login',\n\t\t{'template_name':'inicio/index.html'}, name = 'login'),\n\n\turl(r'^cerrar/$', 'django.contrib.auth.views.logout_then_login',\n\t\tname = 'logout'),\n)","sub_path":"SistemaSH/apps/inicio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"477076498","text":"__author__ = 'jeroendevries'\nbedrag = 4356\ndef mensen():\n global men\n print(\"Hoeveel mensen gaan er mee?\");men = input()\n\ndef kosten_pp(n):\n \"\"\"\n Berekent de kosten pp\n :arg :De hoeveelheid mensen\n :return:\n Kosten pp\n \"\"\"\n\n if int(men) == 0:\n print(\"Delen door 0 mag niet!\")\n else:\n try:\n kosten = bedrag / int(men)\n return kosten\n except:\n print(\"Onverwachte fout\")\n\nmensen()\nprint(kosten_pp(men))","sub_path":"Week_3_College_1/3.2.1 Exceptions stap 1.py","file_name":"3.2.1 Exceptions stap 1.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"270191504","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Dealer\n ======\n\n Dealer tools for watching SCM.\n\n\"\"\"\n\n__version__ = '0.1.8'\n__project__ = __name__\n__author__ = \"Kirill Klenov \"\n__license__ = \"BSD\"\n\n\ndef get_backend(name, **kwargs):\n \" Create backend by name. \"\n\n from importlib import import_module\n\n mod = import_module(__name__ + '.' + name)\n return mod.Backend(**kwargs)\n","sub_path":"dealer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"44129560","text":"#coding=utf-8\nimport requests\n\ndef test():\n url = \"http://biz-test.jiutongpay.com.cn\"\n headers = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\n }\n\n s = requests.session()\n r = requests.get(url=url,headers=headers,verify=Flase)\n print(s.cookies)\n c = requests.cookies\n\nif __name__ == \"__main__\":\n test()","sub_path":"requestss/jt_requests.py","file_name":"jt_requests.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"21892170","text":"from sklearn.datasets import fetch_20newsgroups\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\ndef nb_news():\n \"\"\"\n朴素贝叶斯进行文本分类\n :return: None\n \"\"\"\n # 获取数据\n news = fetch_20newsgroups(subset=\"all\")\n # 数据处理--划分数据集\n x_train,x_test,y_train,y_test = train_test_split(news.data,news.target)\n # 特征工程--文本特征抽取(tfidf)\n transfer = TfidfVectorizer()\n x_train = transfer.fit_transform(x_train)\n x_test = transfer.transform(x_test)\n # 朴素贝叶斯算法评估器流程\n estimator = MultinomialNB()\n estimator.fit(x_train,y_train)\n # 模型评估\n #方法1:直接对比真实值和预测值\n y_predict = estimator.predict(x_test)\n print(\"预测值:\",y_predict)\n print(\"真实值和实际值的比对:\",y_predict==y_test)\n #方法2:准确率进行判断\n print(estimator.score(x_test,y_test))\n return None\nif __name__ == \"__main__\":\n nb_news()","sub_path":"day_03_bayes.py","file_name":"day_03_bayes.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"59760744","text":"import collections\nimport queue\n\n\n# First in First out のキュー\nq = queue.Queue()\n\n# Last in First out のキュー(最後にinしたものを最初にoutする)\nlq = queue.LifoQueue()\n\n# リスト\nl = list()\n\n# リストより高速なのでキューやスタックとしてデータを扱う際はdequeを使った方が良い\nd = collections.deque()\n\n\nfor i in range(3):\n q.put(i)\n lq.put(i)\n l.append(i)\n d.append(i)\n\nfor _ in range(3):\n print('FIFO : {}'.format(q.get()))\n print('LIFO : {}'.format(lq.get()))\n print('LIST : {}'.format(l.pop(0)))\n print('DEQUE: {}'.format(d.popleft()))\n","sub_path":"lesson/18/229.py","file_name":"229.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"103024849","text":"import sys\nimport io\n\n# Get input file via arguments\nfileName = sys.argv[1]\n\n# Create data structures needed\n\n\n# Open input file for reading\ninText = open(fileName, 'r')\n\n# Main loop for reading line by line\nwhile True:\n # currentLine is the line read by interpretative\n currentLine = inText.readline()\n\n # Parse commands and process them\n\n # End of file\n if not currentLine:\n exit(0)\n","sub_path":"assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"588250617","text":"from setuptools import setup\nfrom os import path\n\n\ndef read(fn):\n dir = path.dirname('__file__')\n with open(path.join(dir, fn)) as fp:\n return fp.read()\n\n\nsetup(\n name='tah_common',\n version=read('VERSION'),\n author='Till Hoffmann',\n author_email='tillahoffmann@gmail.com',\n description='commonly used functionality',\n long_description=read('README.md'),\n url='https://github.com/tillahoffmann/tah_common',\n packages=['tah_common'],\n requires=[\n 'numpy',\n 'scipy',\n 'matplotlib',\n 'pandas',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"339743469","text":"#!/usr/bin/env python3\n\nimport pytest\n\nimport os\n\nfrom datetime import datetime\n\n\nfrom metplus.wrappers.ensemble_stat_wrapper import EnsembleStatWrapper\n\nfcst_dir = '/some/path/fcst'\nobs_dir = '/some/path/obs'\nens_mean_dir = '/some/path/ens_mean'\nens_mean_template = 'the_ens_mean_file.nc'\nobs_point_template = 'point_obs.nc'\nfcst_name = 'APCP'\nfcst_level = 'A03'\nobs_name = 'APCP_03'\nobs_level_no_quotes = '(*,*)'\nobs_level = f'\"{obs_level_no_quotes}\"'\nfcst_fmt = f'field = [{{ name=\"{fcst_name}\"; level=\"{fcst_level}\"; }}];'\nobs_fmt = (f'field = [{{ name=\"{obs_name}\"; '\n f'level=\"{obs_level_no_quotes}\"; }}];')\n\ntime_fmt = '%Y%m%d%H'\nrun_times = ['2005080700', '2005080712']\n\n\ndef set_minimum_config_settings(config, set_fields=True):\n # set config variables to prevent command from running and bypass check\n # if input files actually exist\n config.set('config', 'DO_NOT_RUN_EXE', True)\n config.set('config', 'INPUT_MUST_EXIST', False)\n\n # set process and time config variables\n config.set('config', 'PROCESS_LIST', 'EnsembleStat')\n config.set('config', 'LOOP_BY', 'INIT')\n config.set('config', 'INIT_TIME_FMT', time_fmt)\n config.set('config', 'INIT_BEG', run_times[0])\n config.set('config', 'INIT_END', run_times[-1])\n config.set('config', 'INIT_INCREMENT', '12H')\n config.set('config', 'LEAD_SEQ', '12H')\n config.set('config', 'LOOP_ORDER', 'times')\n config.set('config', 'ENSEMBLE_STAT_N_MEMBERS', 1)\n config.set('config', 'ENSEMBLE_STAT_CONFIG_FILE',\n '{PARM_BASE}/met_config/EnsembleStatConfig_wrapped')\n config.set('config', 'FCST_ENSEMBLE_STAT_INPUT_DIR', fcst_dir)\n config.set('config', 'OBS_ENSEMBLE_STAT_GRID_INPUT_DIR', obs_dir)\n config.set('config', 'FCST_ENSEMBLE_STAT_INPUT_TEMPLATE',\n '{init?fmt=%Y%m%d%H}/fcst_file_F{lead?fmt=%3H}')\n config.set('config', 'OBS_ENSEMBLE_STAT_GRID_INPUT_TEMPLATE',\n '{valid?fmt=%Y%m%d%H}/obs_file')\n config.set('config', 'ENSEMBLE_STAT_OUTPUT_DIR',\n '{OUTPUT_BASE}/EnsembleStat/output')\n config.set('config', 'ENSEMBLE_STAT_OUTPUT_TEMPLATE', '{valid?fmt=%Y%m%d%H}')\n\n if set_fields:\n config.set('config', 'FCST_VAR1_NAME', fcst_name)\n config.set('config', 'FCST_VAR1_LEVELS', fcst_level)\n config.set('config', 'OBS_VAR1_NAME', obs_name)\n config.set('config', 'OBS_VAR1_LEVELS', obs_level)\n\n\n@pytest.mark.parametrize(\n 'config_overrides, expected_filename', [\n # 0 - set forecast level\n ({'FCST_VAR1_NAME': 'fcst_file',\n 'FCST_VAR1_LEVELS': 'A06',\n 'OBS_VAR1_NAME': 'obs_file',\n 'OBS_VAR1_LEVELS': 'A06',\n 'FCST_ENSEMBLE_STAT_INPUT_TEMPLATE': '{fcst_name}_A{level?fmt=%3H}',\n },\n f'{fcst_dir}/fcst_file_A006'),\n # 1 - don't set forecast level\n ({'FCST_ENSEMBLE_STAT_INPUT_TEMPLATE': 'fcst_file_A{level?fmt=%3H}'},\n f'{fcst_dir}/fcst_file_A000'),\n ]\n)\n@pytest.mark.wrapper_c\ndef test_ensemble_stat_level_in_template(metplus_config, config_overrides,\n expected_filename):\n\n config = metplus_config\n\n set_minimum_config_settings(config, set_fields=False)\n\n # set config variable overrides\n for key, value in config_overrides.items():\n config.set('config', key, value)\n\n wrapper = EnsembleStatWrapper(config)\n assert wrapper.isOK\n\n file_list_dir = wrapper.config.getdir('FILE_LISTS_DIR')\n file_list_file = f\"{file_list_dir}/20050807000000_12_ensemble_stat.txt\"\n if os.path.exists(file_list_file):\n os.remove(file_list_file)\n\n wrapper.run_all_times()\n assert os.path.exists(file_list_file)\n with open(file_list_file, 'r') as file_handle:\n filenames = file_handle.read().splitlines()[1:]\n assert len(filenames) == 1\n assert filenames[0] == expected_filename\n\n\n@pytest.mark.parametrize(\n 'config_overrides, env_var_values', [\n # 0 : no ens, 1 fcst, 1 obs\n ({'FCST_VAR1_NAME': 'fcst_name_1',\n 'FCST_VAR1_LEVELS': 'FCST_LEVEL_1',\n 'OBS_VAR1_NAME': 'obs_name_1',\n 'OBS_VAR1_LEVELS': 'OBS_LEVEL_1',\n },\n {'METPLUS_FCST_FIELD': ('field = ['\n '{ name=\"fcst_name_1\"; level=\"FCST_LEVEL_1\"; }'\n '];'),\n 'METPLUS_OBS_FIELD': ('field = ['\n '{ name=\"obs_name_1\"; level=\"OBS_LEVEL_1\"; }'\n '];'),\n }),\n ]\n)\n@pytest.mark.wrapper_c\ndef test_ensemble_stat_field_info(metplus_config, config_overrides,\n env_var_values):\n\n config = metplus_config\n\n set_minimum_config_settings(config, set_fields=False)\n\n # set config variable overrides\n for key, value in config_overrides.items():\n config.set('config', key, value)\n\n wrapper = EnsembleStatWrapper(config)\n assert wrapper.isOK\n\n all_cmds = wrapper.run_all_times()\n\n assert len(all_cmds) == 2\n\n actual_env_vars = all_cmds[0][1]\n for key, expected_value in env_var_values.items():\n match = next((item for item in actual_env_vars if\n item.startswith(key)), None)\n assert match is not None\n actual_value = match.split('=', 1)[1]\n assert actual_value == expected_value\n print(f\"ACTUAL : {actual_value}\")\n print(f\"EXPECTED: {expected_value}\")\n\n\n@pytest.mark.parametrize(\n 'config_overrides, env_var_values', [\n # 0 no climo settings\n ({}, {}),\n # 1 mean template only\n ({'ENSEMBLE_STAT_CLIMO_MEAN_INPUT_TEMPLATE': 'gs_mean_{init?fmt=%Y%m%d%H}.tmpl'},\n {'CLIMO_MEAN_FILE': '\"gs_mean_YMDH.tmpl\"',\n 'CLIMO_STDEV_FILE': '', }),\n # 2 mean template and dir\n ({'ENSEMBLE_STAT_CLIMO_MEAN_INPUT_TEMPLATE': 'gs_mean_{init?fmt=%Y%m%d%H}.tmpl',\n 'ENSEMBLE_STAT_CLIMO_MEAN_INPUT_DIR': '/climo/mean/dir'},\n {'CLIMO_MEAN_FILE': '\"/climo/mean/dir/gs_mean_YMDH.tmpl\"',\n 'CLIMO_STDEV_FILE': '', }),\n # 3 stdev template only\n ({'ENSEMBLE_STAT_CLIMO_STDEV_INPUT_TEMPLATE': 'gs_stdev_{init?fmt=%Y%m%d%H}.tmpl'},\n {'CLIMO_STDEV_FILE': '\"gs_stdev_YMDH.tmpl\"', }),\n # 4 stdev template and dir\n ({'ENSEMBLE_STAT_CLIMO_STDEV_INPUT_TEMPLATE': 'gs_stdev_{init?fmt=%Y%m%d%H}.tmpl',\n 'ENSEMBLE_STAT_CLIMO_STDEV_INPUT_DIR': '/climo/stdev/dir'},\n {'CLIMO_STDEV_FILE': '\"/climo/stdev/dir/gs_stdev_YMDH.tmpl\"', }),\n ]\n)\n@pytest.mark.wrapper_c\ndef test_handle_climo_file_variables(metplus_config, config_overrides,\n env_var_values):\n \"\"\"! Ensure that old and new variables for setting climo_mean and\n climo_stdev are set to the correct values\n \"\"\"\n old_env_vars = ['CLIMO_MEAN_FILE',\n 'CLIMO_STDEV_FILE']\n config = metplus_config\n\n set_minimum_config_settings(config)\n\n # set config variable overrides\n for key, value in config_overrides.items():\n config.set('config', key, value)\n\n wrapper = EnsembleStatWrapper(config)\n assert wrapper.isOK\n\n all_cmds = wrapper.run_all_times()\n assert len(all_cmds) == len(run_times)\n for (_, actual_env_vars), run_time in zip(all_cmds, run_times):\n run_dt = datetime.strptime(run_time, time_fmt)\n ymdh = run_dt.strftime('%Y%m%d%H')\n print(f\"ACTUAL ENV VARS: {actual_env_vars}\")\n for old_env in old_env_vars:\n match = next((item for item in actual_env_vars if\n item.startswith(old_env)), None)\n assert match is not None\n actual_value = match.split('=', 1)[1]\n expected_value = env_var_values.get(old_env, '')\n expected_value = expected_value.replace('YMDH', ymdh)\n assert expected_value == actual_value\n\n\n@pytest.mark.parametrize(\n 'config_overrides, env_var_values', [\n ({'MODEL': 'my_model'},\n {'METPLUS_MODEL': 'model = \"my_model\";'}),\n\n ({'ENSEMBLE_STAT_DESC': 'my_desc'},\n {'METPLUS_DESC': 'desc = \"my_desc\";'}),\n\n ({'DESC': 'my_desc'},\n {'METPLUS_DESC': 'desc = \"my_desc\";'}),\n\n ({'OBTYPE': 'my_obtype'},\n {'METPLUS_OBTYPE': 'obtype = \"my_obtype\";'}),\n\n ({'ENSEMBLE_STAT_REGRID_TO_GRID': 'FCST',\n },\n {'METPLUS_REGRID_DICT': 'regrid = {to_grid = FCST;}',\n 'REGRID_TO_GRID': 'FCST'}),\n\n ({'ENSEMBLE_STAT_REGRID_METHOD': 'NEAREST',\n },\n {'METPLUS_REGRID_DICT': 'regrid = {method = NEAREST;}'}),\n\n ({'ENSEMBLE_STAT_REGRID_WIDTH': '1',\n },\n {'METPLUS_REGRID_DICT': 'regrid = {width = 1;}'}),\n\n ({'ENSEMBLE_STAT_REGRID_VLD_THRESH': '0.5',\n },\n {'METPLUS_REGRID_DICT': 'regrid = {vld_thresh = 0.5;}'}),\n\n ({'ENSEMBLE_STAT_REGRID_SHAPE': 'SQUARE',\n },\n {'METPLUS_REGRID_DICT': 'regrid = {shape = SQUARE;}'}),\n\n ({'ENSEMBLE_STAT_REGRID_CONVERT': '2*x', },\n {'METPLUS_REGRID_DICT': 'regrid = {convert(x) = 2*x;}'}),\n\n ({'ENSEMBLE_STAT_REGRID_CENSOR_THRESH': '>12000,<5000', },\n {'METPLUS_REGRID_DICT': 'regrid = {censor_thresh = [>12000, <5000];}'}),\n\n ({'ENSEMBLE_STAT_REGRID_CENSOR_VAL': '12000,5000', },\n {'METPLUS_REGRID_DICT': 'regrid = {censor_val = [12000, 5000];}'}),\n\n ({'ENSEMBLE_STAT_REGRID_TO_GRID': 'FCST',\n 'ENSEMBLE_STAT_REGRID_METHOD': 'NEAREST',\n 'ENSEMBLE_STAT_REGRID_WIDTH': '1',\n 'ENSEMBLE_STAT_REGRID_VLD_THRESH': '0.5',\n 'ENSEMBLE_STAT_REGRID_SHAPE': 'SQUARE',\n 'ENSEMBLE_STAT_REGRID_CONVERT': '2*x',\n 'ENSEMBLE_STAT_REGRID_CENSOR_THRESH': '>12000,<5000',\n 'ENSEMBLE_STAT_REGRID_CENSOR_VAL': '12000,5000',\n },\n {'METPLUS_REGRID_DICT': ('regrid = {to_grid = FCST;method = NEAREST;'\n 'width = 1;vld_thresh = 0.5;shape = SQUARE;'\n 'convert(x) = 2*x;'\n 'censor_thresh = [>12000, <5000];'\n 'censor_val = [12000, 5000];}'\n ),\n 'REGRID_TO_GRID': 'FCST'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_INPUT_TEMPLATE':\n '/some/path/climo/filename.nc',\n },\n {'METPLUS_CLIMO_MEAN_DICT':\n 'climo_mean = {file_name = [\"/some/path/climo/filename.nc\"];}',\n 'CLIMO_MEAN_FILE':\n '\"/some/path/climo/filename.nc\"',\n }),\n ({'ENSEMBLE_STAT_CLIMO_STDEV_INPUT_TEMPLATE':\n '/some/path/climo/stdfile.nc',\n },\n {'METPLUS_CLIMO_STDEV_DICT':\n 'climo_stdev = {file_name = [\"/some/path/climo/stdfile.nc\"];}',\n 'CLIMO_STDEV_FILE':\n '\"/some/path/climo/stdfile.nc\"',\n }),\n # 12 mask grid and poly (old config var)\n ({'ENSEMBLE_STAT_MASK_GRID': 'FULL',\n 'ENSEMBLE_STAT_VERIFICATION_MASK_TEMPLATE': 'one, two',\n },\n {'METPLUS_MASK_GRID':\n 'grid = [\"FULL\"];',\n 'METPLUS_MASK_POLY':\n 'poly = [\"one\", \"two\"];',\n }),\n # 13 mask grid and poly (new config var)\n ({'ENSEMBLE_STAT_MASK_GRID': 'FULL',\n 'ENSEMBLE_STAT_MASK_POLY': 'one, two',\n },\n {'METPLUS_MASK_GRID':\n 'grid = [\"FULL\"];',\n 'METPLUS_MASK_POLY':\n 'poly = [\"one\", \"two\"];',\n }),\n # 14 mask grid value\n ({'ENSEMBLE_STAT_MASK_GRID': 'FULL',\n },\n {'METPLUS_MASK_GRID':\n 'grid = [\"FULL\"];',\n }),\n # 15 mask grid empty string (should create empty list)\n ({'ENSEMBLE_STAT_MASK_GRID': '',\n },\n {'METPLUS_MASK_GRID':\n 'grid = [];',\n }),\n # 16 mask poly (old config var)\n ({'ENSEMBLE_STAT_VERIFICATION_MASK_TEMPLATE': 'one, two',\n },\n {'METPLUS_MASK_POLY':\n 'poly = [\"one\", \"two\"];',\n }),\n # 27 mask poly (new config var)\n ({'ENSEMBLE_STAT_MASK_POLY': 'one, two',\n },\n {'METPLUS_MASK_POLY':\n 'poly = [\"one\", \"two\"];',\n }),\n # output_prefix\n ({'ENSEMBLE_STAT_OUTPUT_PREFIX': 'my_output_prefix'},\n {'METPLUS_OUTPUT_PREFIX': 'output_prefix = \"my_output_prefix\";'}),\n # output_flag individual and all at once\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_ECNT': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {ecnt = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_RPS': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {rps = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_RHIST': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {rhist = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_PHIST': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {phist = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_ORANK': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {orank = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_SSVAR': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {ssvar = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_RELP': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {relp = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_PCT': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {pct = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_PSTD': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {pstd = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_PJC': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {pjc = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_PRC': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {prc = STAT;}'}),\n\n ({'ENSEMBLE_STAT_OUTPUT_FLAG_ECLV': 'STAT', },\n {'METPLUS_OUTPUT_FLAG_DICT': 'output_flag = {eclv = STAT;}'}),\n\n ({\n 'ENSEMBLE_STAT_OUTPUT_FLAG_ECNT': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_RPS': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_RHIST': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_PHIST': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_ORANK': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_SSVAR': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_RELP': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_PCT': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_PSTD': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_PJC': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_PRC': 'STAT',\n 'ENSEMBLE_STAT_OUTPUT_FLAG_ECLV': 'STAT',\n },\n {\n 'METPLUS_OUTPUT_FLAG_DICT': ('output_flag = {ecnt = STAT;'\n 'rps = STAT;rhist = STAT;'\n 'phist = STAT;orank = STAT;'\n 'ssvar = STAT;relp = STAT;'\n 'pct = STAT;pstd = STAT;'\n 'pjc = STAT;prc = STAT;eclv = STAT;'\n '}')}),\n # nc_orank_flag\n ({'ENSEMBLE_STAT_NC_ORANK_FLAG_LATLON': 'True', },\n {'METPLUS_NC_ORANK_FLAG_DICT': 'nc_orank_flag = {latlon = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_NC_ORANK_FLAG_MEAN': 'True', },\n {'METPLUS_NC_ORANK_FLAG_DICT': 'nc_orank_flag = {mean = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_NC_ORANK_FLAG_RAW': 'True', },\n {'METPLUS_NC_ORANK_FLAG_DICT': 'nc_orank_flag = {raw = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_NC_ORANK_FLAG_RANK': 'True', },\n {'METPLUS_NC_ORANK_FLAG_DICT': 'nc_orank_flag = {rank = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_NC_ORANK_FLAG_PIT': 'True', },\n {'METPLUS_NC_ORANK_FLAG_DICT': 'nc_orank_flag = {pit = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_NC_ORANK_FLAG_VLD_COUNT': 'True', },\n {\n 'METPLUS_NC_ORANK_FLAG_DICT': 'nc_orank_flag = {vld_count = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_NC_ORANK_FLAG_WEIGHT': 'True', },\n {'METPLUS_NC_ORANK_FLAG_DICT': 'nc_orank_flag = {weight = TRUE;}'}),\n\n ({\n 'ENSEMBLE_STAT_NC_ORANK_FLAG_LATLON': 'True',\n 'ENSEMBLE_STAT_NC_ORANK_FLAG_MEAN': 'True',\n 'ENSEMBLE_STAT_NC_ORANK_FLAG_RAW': 'True',\n 'ENSEMBLE_STAT_NC_ORANK_FLAG_RANK': 'True',\n 'ENSEMBLE_STAT_NC_ORANK_FLAG_PIT': 'True',\n 'ENSEMBLE_STAT_NC_ORANK_FLAG_VLD_COUNT': 'True',\n 'ENSEMBLE_STAT_NC_ORANK_FLAG_WEIGHT': 'True',\n },\n {\n 'METPLUS_NC_ORANK_FLAG_DICT': ('nc_orank_flag = {latlon = TRUE;'\n 'mean = TRUE;raw = TRUE;'\n 'rank = TRUE;pit = TRUE;'\n 'vld_count = TRUE;'\n 'weight = TRUE;}')\n }),\n\n # climo_cdf dictionary\n ({'ENSEMBLE_STAT_CLIMO_CDF_CDF_BINS': '1', },\n {'METPLUS_CLIMO_CDF_DICT': 'climo_cdf = {cdf_bins = 1.0;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_CDF_CENTER_BINS': 'True', },\n {'METPLUS_CLIMO_CDF_DICT': 'climo_cdf = {center_bins = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_CDF_WRITE_BINS': 'False', },\n {'METPLUS_CLIMO_CDF_DICT': 'climo_cdf = {write_bins = FALSE;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_CDF_DIRECT_PROB': 'False', },\n {'METPLUS_CLIMO_CDF_DICT': 'climo_cdf = {direct_prob = FALSE;}'}),\n\n ({\n 'ENSEMBLE_STAT_CLIMO_CDF_CDF_BINS': '1',\n 'ENSEMBLE_STAT_CLIMO_CDF_CENTER_BINS': 'True',\n 'ENSEMBLE_STAT_CLIMO_CDF_WRITE_BINS': 'False',\n 'ENSEMBLE_STAT_CLIMO_CDF_DIRECT_PROB': 'False',\n },\n {\n 'METPLUS_CLIMO_CDF_DICT': 'climo_cdf = {cdf_bins = 1.0;center_bins = TRUE;write_bins = FALSE;direct_prob = FALSE;}'}),\n\n ({'ENSEMBLE_STAT_INTERP_VLD_THRESH': '0.8', },\n {'METPLUS_INTERP_DICT': 'interp = {vld_thresh = 0.8;}'}),\n\n ({'ENSEMBLE_STAT_INTERP_SHAPE': 'CIRCLE', },\n {'METPLUS_INTERP_DICT': 'interp = {shape = CIRCLE;}'}),\n\n ({'ENSEMBLE_STAT_INTERP_TYPE_METHOD': 'BILIN', },\n {'METPLUS_INTERP_DICT': 'interp = {type = {method = [BILIN];}}'}),\n\n ({'ENSEMBLE_STAT_INTERP_TYPE_WIDTH': '2', },\n {'METPLUS_INTERP_DICT': 'interp = {type = {width = [2];}}'}),\n # multiple interp type methods\n ({'ENSEMBLE_STAT_INTERP_TYPE_METHOD': 'BILIN, NEAREST', },\n {'METPLUS_INTERP_DICT': 'interp = {type = {method = [BILIN, NEAREST];}}'}),\n # multiple interp type methods\n ({'ENSEMBLE_STAT_INTERP_TYPE_WIDTH': '2,3', },\n {'METPLUS_INTERP_DICT': 'interp = {type = {width = [2, 3];}}'}),\n\n ({\n 'ENSEMBLE_STAT_INTERP_VLD_THRESH': '0.8',\n 'ENSEMBLE_STAT_INTERP_SHAPE': 'CIRCLE',\n 'ENSEMBLE_STAT_INTERP_TYPE_METHOD': 'BILIN',\n 'ENSEMBLE_STAT_INTERP_TYPE_WIDTH': '2',\n },\n {'METPLUS_INTERP_DICT': ('interp = {vld_thresh = 0.8;'\n 'shape = CIRCLE;'\n 'type = {method = [BILIN];width = [2];}}')}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_FILE_NAME': '/some/climo_mean/file.txt', },\n {'METPLUS_CLIMO_MEAN_DICT': ('climo_mean = {file_name = '\n '[\"/some/climo_mean/file.txt\"];}'),\n 'CLIMO_MEAN_FILE': '\"/some/climo_mean/file.txt\"'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_FIELD': '{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}', },\n {'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {field = [{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}];}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_METHOD': 'NEAREST', },\n {'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {regrid = {method = NEAREST;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_WIDTH': '1', },\n {'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {regrid = {width = 1;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_VLD_THRESH': '0.5', },\n {\n 'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {regrid = {vld_thresh = 0.5;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_SHAPE': 'SQUARE', },\n {'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {regrid = {shape = SQUARE;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_TIME_INTERP_METHOD': 'NEAREST', },\n {\n 'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {time_interp_method = NEAREST;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_MATCH_MONTH': 'True', },\n {'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {match_month = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_DAY_INTERVAL': '30', },\n {'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {day_interval = 30;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_MEAN_HOUR_INTERVAL': '12', },\n {'METPLUS_CLIMO_MEAN_DICT': 'climo_mean = {hour_interval = 12;}'}),\n\n ({\n 'ENSEMBLE_STAT_CLIMO_MEAN_FILE_NAME': '/some/climo_mean/file.txt',\n 'ENSEMBLE_STAT_CLIMO_MEAN_FIELD': '{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}',\n 'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_METHOD': 'NEAREST',\n 'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_WIDTH': '1',\n 'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_VLD_THRESH': '0.5',\n 'ENSEMBLE_STAT_CLIMO_MEAN_REGRID_SHAPE': 'SQUARE',\n 'ENSEMBLE_STAT_CLIMO_MEAN_TIME_INTERP_METHOD': 'NEAREST',\n 'ENSEMBLE_STAT_CLIMO_MEAN_MATCH_MONTH': 'True',\n 'ENSEMBLE_STAT_CLIMO_MEAN_DAY_INTERVAL': '30',\n 'ENSEMBLE_STAT_CLIMO_MEAN_HOUR_INTERVAL': '12',\n },\n {'METPLUS_CLIMO_MEAN_DICT': ('climo_mean = {file_name = '\n '[\"/some/climo_mean/file.txt\"];'\n 'field = [{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}];'\n 'regrid = {method = NEAREST;width = 1;'\n 'vld_thresh = 0.5;shape = SQUARE;}'\n 'time_interp_method = NEAREST;'\n 'match_month = TRUE;day_interval = 30;'\n 'hour_interval = 12;}'),\n 'CLIMO_MEAN_FILE': '\"/some/climo_mean/file.txt\"'}),\n\n # climo stdev\n ({'ENSEMBLE_STAT_CLIMO_STDEV_FILE_NAME': '/some/climo_stdev/file.txt', },\n {'METPLUS_CLIMO_STDEV_DICT': ('climo_stdev = {file_name = '\n '[\"/some/climo_stdev/file.txt\"];}'),\n 'CLIMO_STDEV_FILE': '\"/some/climo_stdev/file.txt\"'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_FIELD': '{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}', },\n {'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {field = [{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}];}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_METHOD': 'NEAREST', },\n {\n 'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {regrid = {method = NEAREST;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_WIDTH': '1', },\n {'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {regrid = {width = 1;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_VLD_THRESH': '0.5', },\n {\n 'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {regrid = {vld_thresh = 0.5;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_SHAPE': 'SQUARE', },\n {\n 'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {regrid = {shape = SQUARE;}}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_TIME_INTERP_METHOD': 'NEAREST', },\n {\n 'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {time_interp_method = NEAREST;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_MATCH_MONTH': 'True', },\n {'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {match_month = TRUE;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_DAY_INTERVAL': '30', },\n {'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {day_interval = 30;}'}),\n\n ({'ENSEMBLE_STAT_CLIMO_STDEV_HOUR_INTERVAL': '12', },\n {'METPLUS_CLIMO_STDEV_DICT': 'climo_stdev = {hour_interval = 12;}'}),\n\n ({\n 'ENSEMBLE_STAT_CLIMO_STDEV_FILE_NAME': '/some/climo_stdev/file.txt',\n 'ENSEMBLE_STAT_CLIMO_STDEV_FIELD': '{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}',\n 'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_METHOD': 'NEAREST',\n 'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_WIDTH': '1',\n 'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_VLD_THRESH': '0.5',\n 'ENSEMBLE_STAT_CLIMO_STDEV_REGRID_SHAPE': 'SQUARE',\n 'ENSEMBLE_STAT_CLIMO_STDEV_TIME_INTERP_METHOD': 'NEAREST',\n 'ENSEMBLE_STAT_CLIMO_STDEV_MATCH_MONTH': 'True',\n 'ENSEMBLE_STAT_CLIMO_STDEV_DAY_INTERVAL': '30',\n 'ENSEMBLE_STAT_CLIMO_STDEV_HOUR_INTERVAL': '12',\n },\n {'METPLUS_CLIMO_STDEV_DICT': ('climo_stdev = {file_name = '\n '[\"/some/climo_stdev/file.txt\"];'\n 'field = [{name=\"CLM_NAME\"; level=\"(0,0,*,*)\";}];'\n 'regrid = {method = NEAREST;width = 1;'\n 'vld_thresh = 0.5;shape = SQUARE;}'\n 'time_interp_method = NEAREST;'\n 'match_month = TRUE;day_interval = 30;'\n 'hour_interval = 12;}'),\n 'CLIMO_STDEV_FILE': '\"/some/climo_stdev/file.txt\"'}),\n ({'ENSEMBLE_STAT_OBS_QUALITY_INC': '2,3,4', },\n {'METPLUS_OBS_QUALITY_INC': 'obs_quality_inc = [\"2\", \"3\", \"4\"];'}),\n ({'ENSEMBLE_STAT_OBS_QUALITY_EXC': '5,6,7', },\n {'METPLUS_OBS_QUALITY_EXC': 'obs_quality_exc = [\"5\", \"6\", \"7\"];'}),\n\n ({'ENSEMBLE_STAT_ENS_MEMBER_IDS': '1,2,3,4', },\n {'METPLUS_ENS_MEMBER_IDS': 'ens_member_ids = [\"1\", \"2\", \"3\", \"4\"];'}),\n\n ({'ENSEMBLE_STAT_CONTROL_ID': '0', },\n {'METPLUS_CONTROL_ID': 'control_id = \"0\";'}),\n\n ({'ENSEMBLE_STAT_GRID_WEIGHT_FLAG': 'COS_LAT', },\n {'METPLUS_GRID_WEIGHT_FLAG': 'grid_weight_flag = COS_LAT;'}),\n\n ({'ENSEMBLE_STAT_PROB_CAT_THRESH': '<=0.25', },\n {'METPLUS_PROB_CAT_THRESH': 'prob_cat_thresh = [<=0.25];'}),\n\n ({'ENSEMBLE_STAT_PROB_PCT_THRESH': '==0.25', },\n {'METPLUS_PROB_PCT_THRESH': 'prob_pct_thresh = [==0.25];'}),\n\n ({'ENSEMBLE_STAT_ECLV_POINTS': '0.05', },\n {'METPLUS_ECLV_POINTS': 'eclv_points = 0.05;'}),\n\n ({'ENSEMBLE_STAT_ENS_THRESH': '0.1', },\n {'METPLUS_ENS_THRESH': 'ens_thresh = 0.1;'}),\n\n ({'ENSEMBLE_STAT_VLD_THRESH': '0.5', },\n {'METPLUS_VLD_THRESH': 'vld_thresh = 0.5;'}),\n\n ({'ENSEMBLE_STAT_OBS_THRESH': 'NA, 0.5', },\n {'METPLUS_OBS_THRESH': 'obs_thresh = [NA, 0.5];'}),\n\n ({'ENSEMBLE_STAT_ENS_MEAN_INPUT_DIR': ens_mean_dir,\n 'ENSEMBLE_STAT_ENS_MEAN_INPUT_TEMPLATE': ens_mean_template},\n {}),\n\n ({'OBS_ENSEMBLE_STAT_POINT_INPUT_DIR': obs_dir,\n 'OBS_ENSEMBLE_STAT_POINT_INPUT_TEMPLATE': obs_point_template},\n {}),\n\n ({'ENSEMBLE_STAT_ENS_MEAN_INPUT_DIR': ens_mean_dir,\n 'ENSEMBLE_STAT_ENS_MEAN_INPUT_TEMPLATE': ens_mean_template,\n 'OBS_ENSEMBLE_STAT_POINT_INPUT_DIR': obs_dir,\n 'OBS_ENSEMBLE_STAT_POINT_INPUT_TEMPLATE': obs_point_template},\n {}),\n\n ]\n)\n@pytest.mark.wrapper_c\ndef test_ensemble_stat_single_field(metplus_config, config_overrides,\n env_var_values):\n\n config = metplus_config\n\n set_minimum_config_settings(config)\n\n # set config variable overrides\n for key, value in config_overrides.items():\n config.set('config', key, value)\n\n wrapper = EnsembleStatWrapper(config)\n assert wrapper.isOK\n\n app_path = os.path.join(config.getdir('MET_BIN_DIR'), wrapper.app_name)\n verbosity = f\"-v {wrapper.c_dict['VERBOSITY']}\"\n file_list_dir = wrapper.config.getdir('FILE_LISTS_DIR')\n config_file = wrapper.c_dict.get('CONFIG_FILE')\n out_dir = wrapper.c_dict.get('OUTPUT_DIR')\n\n point_obs = ' '\n ens_mean = ' '\n if 'OBS_ENSEMBLE_STAT_POINT_INPUT_TEMPLATE' in config_overrides:\n point_obs = f' -point_obs \"{obs_dir}/{obs_point_template}\" '\n if 'ENSEMBLE_STAT_ENS_MEAN_INPUT_TEMPLATE' in config_overrides:\n ens_mean = f' -ens_mean {ens_mean_dir}/{ens_mean_template} '\n\n expected_cmds = [(f\"{app_path} {verbosity} \"\n f\"{file_list_dir}/20050807000000_12_ensemble_stat.txt \"\n f\"{config_file}{point_obs}\"\n f'-grid_obs \"{obs_dir}/2005080712/obs_file\"{ens_mean}'\n f\"-outdir {out_dir}/2005080712\"),\n (f\"{app_path} {verbosity} \"\n f\"{file_list_dir}/20050807120000_12_ensemble_stat.txt \"\n f\"{config_file}{point_obs}\"\n f'-grid_obs \"{obs_dir}/2005080800/obs_file\"{ens_mean}'\n f\"-outdir {out_dir}/2005080800\"),\n ]\n\n all_cmds = wrapper.run_all_times()\n print(f\"ALL COMMANDS: {all_cmds}\")\n assert len(all_cmds) == len(expected_cmds)\n\n missing_env = [item for item in env_var_values\n if item not in wrapper.WRAPPER_ENV_VAR_KEYS]\n env_var_keys = wrapper.WRAPPER_ENV_VAR_KEYS + missing_env\n\n for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds):\n # ensure commands are generated as expected\n assert cmd == expected_cmd\n\n # check that environment variables were set properly\n # including deprecated env vars (not in wrapper env var keys)\n for env_var_key in env_var_keys:\n match = next((item for item in env_vars if\n item.startswith(env_var_key)), None)\n assert(match is not None)\n actual_value = match.split('=', 1)[1]\n if env_var_key == 'METPLUS_FCST_FIELD':\n assert actual_value == fcst_fmt\n elif env_var_key == 'METPLUS_OBS_FIELD':\n assert actual_value == obs_fmt\n else:\n assert env_var_values.get(env_var_key, '') == actual_value\n\n\n@pytest.mark.wrapper_c\ndef test_get_config_file(metplus_config):\n fake_config_name = '/my/config/file'\n\n config = metplus_config\n default_config_file = os.path.join(config.getdir('PARM_BASE'),\n 'met_config',\n 'EnsembleStatConfig_wrapped')\n\n wrapper = EnsembleStatWrapper(config)\n assert wrapper.c_dict['CONFIG_FILE'] == default_config_file\n\n config.set('config', 'ENSEMBLE_STAT_CONFIG_FILE', fake_config_name)\n wrapper = EnsembleStatWrapper(config)\n assert wrapper.c_dict['CONFIG_FILE'] == fake_config_name\n\n\n@pytest.mark.parametrize(\n 'config_overrides, expected_num_files', [\n ({}, 4),\n ({'ENSEMBLE_STAT_ENS_MEMBER_IDS': '1'}, 1),\n ]\n)\n@pytest.mark.wrapper_c\ndef test_ensemble_stat_fill_missing(metplus_config, config_overrides,\n expected_num_files):\n config = metplus_config\n\n set_minimum_config_settings(config)\n\n # change some config values for this test\n config.set('config', 'INIT_END', run_times[0])\n config.set('config', 'ENSEMBLE_STAT_N_MEMBERS', 4)\n\n # set config variable overrides\n for key, value in config_overrides.items():\n config.set('config', key, value)\n\n wrapper = EnsembleStatWrapper(config)\n\n file_list_file = os.path.join(wrapper.config.getdir('FILE_LISTS_DIR'),\n '20050807000000_12_ensemble_stat.txt')\n if os.path.exists(file_list_file):\n os.remove(file_list_file)\n\n all_cmds = wrapper.run_all_times()\n assert len(all_cmds) == 1\n\n with open(file_list_file, 'r') as file_handle:\n actual_num_files = len(file_handle.read().splitlines()) - 1\n\n assert actual_num_files == expected_num_files\n","sub_path":"internal/tests/pytests/wrappers/ensemble_stat/test_ensemble_stat_wrapper.py","file_name":"test_ensemble_stat_wrapper.py","file_ext":"py","file_size_in_byte":31788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"37237050","text":"from tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport tweepy\nfrom nltk.corpus import stopwords\nimport sys\nimport re\nfrom nltk import NaiveBayesClassifier\nimport cPickle as pickle\nfrom nltk.corpus import stopwords\nimport os\nimport re\nimport pkg_resources\nimport itertools\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\n\nresource_package = __name__\n\nresource_path = '/'.join(('', 'model/svm.pkl'))\nsvm_path = pkg_resources.resource_filename(resource_package, resource_path)\nmodel_svm = joblib.load(svm_path)\n\nresource_path = '/'.join(('', 'model/mlp.pkl'))\nmlp_path = pkg_resources.resource_filename(resource_package, resource_path)\nmodel_mlp = joblib.load(mlp_path)\n\nresource_path = '/'.join(('', 'model/count_vect.pkl'))\nvec_path = pkg_resources.resource_filename(resource_package, resource_path)\nvec = joblib.load(vec_path)\n\nresource_path = '/'.join(('', 'model/tfidf_transformer.pkl'))\nidf_path = pkg_resources.resource_filename(resource_package, resource_path)\nidf = joblib.load(idf_path)\n\ndef clasification_SVM_RBF(dataframe):\n tfidf_transformer = TfidfTransformer()\n count_vect = CountVectorizer()\n\n data_samples = dataframe.text\n\n X_new_counts = vec.transform(data_samples)\n X_new_tfidf = idf.transform(X_new_counts)\n\n dataframe['predict'] = pd.DataFrame({'predict': model_svm.predict(X_new_tfidf)})\n y_test = dataframe.emotion.astype(np.int64)\n predict = dataframe.predict\n \n dataframe['state'] = np.where(y_test == predict, 'matched', 'unmatched')\n accuracy = accuracy_score(y_test, predict)\n accuracy = 100 * accuracy\n #conf = confusion_matrix(y_test, predict)\n y_true = y_test\n unique_label = np.unique(y_true)\n conf = (pd.DataFrame(confusion_matrix(y_true, predict, labels=unique_label), \n index=['true:{:}'.format(x) for x in unique_label], \n columns=['pred:{:}'.format(x) for x in unique_label]))\n report = classification_report(y_test, predict)\n\n #showCMPlot()\n \n return dataframe, convertToDict(dataframe) , accuracy , conf, report \n\ndef clasification_MLP(dataframe):\n tfidf_transformer = TfidfTransformer()\n count_vect = CountVectorizer()\n\n data_samples = dataframe.text\n\n X_new_counts = vec.transform(data_samples)\n X_new_tfidf = idf.transform(X_new_counts)\n\n dataframe['predict'] = pd.DataFrame({'predict': model_mlp.predict(X_new_tfidf)})\n y_test = dataframe.emotion.astype(np.int64)\n predict = dataframe.predict\n \n dataframe['state'] = np.where(y_test == predict, 'matched', 'unmatched')\n accuracy_MLP = accuracy_score(y_test, predict)\n accuracy_MLP = 100 * accuracy_MLP\n #conf = confusion_matrix(y_test, predict)\n y_true = y_test\n unique_label = np.unique(y_true)\n conf = (pd.DataFrame(confusion_matrix(y_true, predict, labels=unique_label), \n index=['true:{:}'.format(x) for x in unique_label], \n columns=['pred:{:}'.format(x) for x in unique_label]))\n report = classification_report(y_test, predict)\n\n return dataframe, convertToDict(dataframe), accuracy_MLP, conf, report \n\n\ndef convertToDict(tweet):\n tweets = [] \n for i in range(len(tweet)):\n obj = {}\n #print \"test 2 \", tweet.loc[i]['text']\n obj['text'] = tweet.loc[i]['text']\n obj['emotion'] = tweet.loc[i]['emotion']\n obj['predict'] = tweet.loc[i]['predict']\n obj['state'] = tweet.loc[i]['state']\n #print tweet.iloc[i]['text']\n tweets.append(obj)\n\n return tweets\n\n# def GraphsViewBar(request):\n# f = plt.figure()\n# x = np.arange(10)\n# h = [0,1,2,3,5,6,4,2,1,0]\n# plt.title('Title')\n# plt.xlim(0, 10)\n# plt.ylim(0, 8)\n# plt.xlabel('x label')\n# plt.ylabel('y label')\n# bar1 = plt.bar(x,h,width=1.0,bottom=0,color='Green',alpha=0.65,label='Legend')\n# plt.legend()\n# #show = plt.show()\n# canvas = FigureCanvasAgg(f) \n# response = HttpResponse(content_type='image/png')\n# canvas.print_png(response)\n# matplotlib.pyplot.close(f) \n# return response\n\ndef showCMPlot(request):\n f = plt.figure()\n x = np.arange(10)\n h = [0,1,2,3,5,6,4,2,1,0]\n plt.title('Title')\n plt.xlim(0, 10)\n plt.ylim(0, 8)\n plt.xlabel('x label')\n plt.ylabel('y label')\n bar1 = plt.bar(x,h,width=1.0,bottom=0,color='Green',alpha=0.65,label='Legend')\n plt.legend()\n\n canvas = FigureCanvasAgg(f) \n response = HttpResponse(content_type='image/png')\n canvas.print_png(response)\n matplotlib.pyplot.close(f)\n\n return response\n\ndef analyzeInput(text):\n \n tweet = []\n URL = ['']\n tfidf_transformer = TfidfTransformer()\n count_vect = CountVectorizer()\n\n tweet = [text]\n\n X_new_counts = vec.transform(tweet)\n X_new_tfidf = idf.transform(X_new_counts)\n\n predict = model_svm.predict(X_new_tfidf)\n\n #accuracy = accuracy_score(y_test, predict)\n #accuracy = 100 * accuracy\n\n #print accuracy\n\n if predict == 0:\n predict = \" JOY\"\n URL = '/static/images/0.png'\n elif predict == 1:\n predict = \" FEAR\"\n URL = '/static/images/1.png'\n elif predict == 2:\n predict = \" ANGER\"\n URL = '/static/images/2.png'\n elif predict == 3:\n predict = \" SADNESS\"\n URL = '/static/images/3.png'\n elif predict == 4:\n predict = \" DISGUST\"\n URL = '/static/images/4.png'\n elif predict == 5:\n predict = \" SURPRISE\"\n URL = '/static/images/5.png'\n\n return predict, URL\n\ndef evaluasiPerKelas(tweet):\n\n\n totJoy = 0.0\n totFear = 0.0\n totAnger = 0.0\n totSadness = 0.0\n totDisgust = 0.0\n totSurprise = 0.0\n\n listTweet = tweet['predict'].astype(np.int64)\n\n #print listTweet\n\n for pair in listTweet:\n if (pair == 0):\n totJoy += 1\n elif (pair == 1):\n totFear += 1\n elif (pair == 2):\n totAnger += 1\n elif (pair == 3):\n totSadness += 1\n elif (pair == 4):\n totDisgust += 1\n elif (pair == 5):\n totSurprise += 1\n\n total = totJoy + totFear + totAnger + totSadness + totDisgust + totSurprise\n\n if(total > 0):\n return [round(100*(totJoy/total),6),round(100*(totFear/total),6),round(100*(totAnger/total),6),round(100*(totSadness/total),6),round(100*(totDisgust/total),6),round(100*(totSurprise/total),6)] , [int(totJoy) , int(totFear) , int(totAnger) , int(totSadness) , int(totDisgust) , int(totSurprise)] , int(total)\n else:\n return [\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\"]\n\ndef evaluasiPerKelasMatchUnmatch(tweet):\n\n total = 0\n\n MJoy = 0\n MFear = 0\n MAnger = 0\n MSadness = 0\n MDisgust = 0\n MSurprise = 0\n\n UJoy = 0\n UFear = 0\n UAnger = 0\n USadness = 0\n UDisgust = 0\n USurprise = 0\n\n predict = tweet['predict'].astype(np.int64) \n emotion = tweet['emotion'].astype(np.int64)\n state = tweet['state'].astype(str)\n\n for i in range(len(tweet)):\n if predict.iloc[i] == 0 and state.iloc[i] == \"matched\" :\n MJoy += 1\n elif predict.iloc[i] == 1 and state.iloc[i] == \"matched\" :\n MFear += 1\n elif predict.iloc[i] == 2 and state.iloc[i] == \"matched\" :\n MAnger += 1\n elif predict.iloc[i] == 3 and state.iloc[i] == \"matched\" :\n MSadness += 1\n elif predict.iloc[i] == 4 and state.iloc[i] == \"matched\" :\n MDisgust += 1\n elif predict.iloc[i] == 5 and state.iloc[i] == \"matched\" :\n MSurprise += 1\n\n for i in range(len(tweet)):\n if predict.iloc[i] == 0 and state.iloc[i] == \"unmatched\" :\n UJoy += 1\n elif predict.iloc[i] == 1 and state.iloc[i] == \"unmatched\" :\n UFear += 1\n elif predict.iloc[i] == 2 and state.iloc[i] == \"unmatched\" :\n UAnger += 1\n elif predict.iloc[i] == 3 and state.iloc[i] == \"unmatched\" :\n USadness += 1\n elif predict.iloc[i] == 4 and state.iloc[i] == \"unmatched\" :\n UDisgust += 1\n elif predict.iloc[i] == 5 and state.iloc[i] == \"unmatched\" :\n USurprise += 1\n\n total = MJoy+MFear+MAnger+MSadness+MDisgust+MSurprise + UJoy+UFear+UAnger+USadness+UDisgust+USurprise\n\n return [MJoy,MFear,MAnger,MSadness,MDisgust,MSurprise] , [UJoy,UFear,UAnger,USadness,UDisgust,USurprise] , total\n","sub_path":"Algo/Classify.py","file_name":"Classify.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"584541399","text":"#!/bin/env python\n\nfrom common import *\nfrom fea_kernel import *\nfrom fea_core import *\nfrom data_loader import Cc\n\n\ndef getStatus(rate):\n if rate <= 0.901:\n return 2\n elif rate >= 1.099:\n return 1\n return 0\n\n\ndef getWavStatus(h_rate, l_rate):\n if h_rate >= 1.099 and l_rate <= 0.901:\n return 3\n elif l_rate <= 0.901:\n return 2\n elif h_rate >= 1.099:\n return 1\n return 0\n\n\ndef genEx(v):\n ex = []\n ex += [np.asarray(v[0], dtype=np.float64)]\n\n cc = Cc(*v[:11])\n # for i in [5, 10, 15]:\n base_ex = []\n for i in [5, 10, 20, 60]:\n # emv_value, emv_ma = emv(cc, i)\n\n # cr_value = cr(cc, i)\n # br_value = br(cc, i)\n\n sma_value = sma(cc.e, i)\n ema_value = ema(cc.e, sma_value, i)\n sma_value = fea_length_extend(sma_value, len(cc.ds))\n ema_value = fea_length_extend(ema_value, len(cc.ds))\n bias_value = bias(cc, i)\n v_value = vvv(cc, i)\n # boll_rate, boll_std = boll(cc, i)\n\n # rsi_value = rsi(cc, i)\n\n # cci_value = cci(cc, i)\n\n # osc_value = osc(cc, i)\n # psy_value = psy(cc, i)\n # wms_value = wms(cc, i)\n # obv_value = obv(cc, i)\n base_ex += [\n sma_value,\n # ema_value, bias_value, v_value,\n # emv_value, emv_ma, cr_value, br_value, \n # boll_rate, boll_std, \n # rsi_value, cci_value, osc_value, psy_value, wms_value,\n # obv_value\n ]\n # base_ex += [base_ex[0] / base_ex[4]]\n # base_ex[-1][np.isinf(base_ex[-1])] = 0\n # base_ex += [base_ex[1] / base_ex[5]]\n # base_ex[-1][np.isinf(base_ex[-1])] = 0\n # base_ex += [base_ex[2] / base_ex[6]]\n # base_ex[-1][np.isinf(base_ex[-1])] = 0\n # base_ex += [base_ex[3] / base_ex[7]]\n # base_ex[-1][np.isinf(base_ex[-1])] = 0\n\n ex += base_ex\n # # for (a, b, c) in [(4, 2, 2), (9, 3, 3), (16, 4, 4)]:\n for (a, b, c) in [(9, 3, 3)]:\n k, d, j = kdj(cc, a, b, c)\n ex += [k, d, j]\n\n # # for (l, s, m) in [(5, 3, 2), (10, 5, 3), (15, 7, 5)]:\n # for (l, s, m) in [(5, 3, 2)]:\n # diff, diff_ma, diff_ema = macd(cc, l, s, m)\n # ex += [diff]\n\n # ex += [cdp(cc)]\n\n # for (a, b) in [(4, 2), (8, 4), (12, 6)]:\n # mtm_value, mtma = mtm(cc, a, b)\n # ex += [mtm_value, mtma]\n\n # for a in [10, 15]:\n # vr_value = vr(cc, a)\n # ex += [vr_value]\n\n\n return ex\n\n\n# open,close,high,low,volume\ndef extend(key, v):\n for i in [2, 3, 4, 5, 6]:\n v[i] = map(float, v[i])\n rate = map(lambda x, y: 0 if x == 0 else y / x, v[3][1:], v[3][:-1]) + [0]\n v_rate = map(lambda x, y: 0 if x == 0 else y / x, v[6][1:], v[6][:-1]) + [0]\n\n # ex = genEx(v)\n # ex = []\n\n work_day = range(len(e_rate), 0, -1)\n\n buy = [-1.0] + v[2][:-1]\n sell = [-1.0, -1.0] + v[3][:-2]\n\n tgt = map(lambda x, y: y / x, buy, sell)\n v = v + [rate, v_rate, tgt]\n v = map(lambda x: map(str, x), v)\n\n # work_day = map(str, work_day)\n # aux = [v[0], work_day, v[4], v[5], v[6], v[7], v[8], v[15], v[16], v[18]]\n # for i in range(len(aux)):\n # aux[i] = aux[i][:200]\n return v\n","sub_path":"src/format3.py","file_name":"format3.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"475581008","text":"\n\nfrom xai.brain.wordbase.nouns._syllogism import _SYLLOGISM\n\n#calss header\nclass _SYLLOGISMS(_SYLLOGISM, ):\n\tdef __init__(self,): \n\t\t_SYLLOGISM.__init__(self)\n\t\tself.name = \"SYLLOGISMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"syllogism\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_syllogisms.py","file_name":"_syllogisms.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"606582799","text":"#!/usr/local/bin/python3\n\nimport os\nimport pickle\nimport io\n\npath = '/Users/tianya/dev/codes/myGihub/MyPython/learn/serialization'\npath = path + '/backup1'\n\nd = dict(name='Bob', age=20, score=88)\n\nf = open(path, 'wb')\npickle.dump(d, f)\nf.close()\n\nf = open(path, 'rb')\nd1 = pickle.load(f)\nf.close()\n\nprint(d1)\n","sub_path":"python/learn/serialization/TestPickle.py","file_name":"TestPickle.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"34679184","text":"from datadog import initialize, api\nimport os\nimport env\n\nos.environ[\"DD_API_KEY\"] = env.DD_API_KEY_EU\nos.environ[\"DD_APP_KEY\"] = env.DD_APP_KEY_EU\n\noptions = {\n 'api_key': os.environ[\"DD_API_KEY\"],\n 'app_key': os.environ[\"DD_APP_KEY\"],\n 'api_host': \"https://api.datadoghq.eu\"\n}\n\ninitialize(**options)\n\nmonitor_options = {\n \"notify_no_data\": True,\n \"no_data_timeframe\": 20\n}\n\ntags = [\"test:richard\", \"app:webserver\", \"frontend\"]\n\ntry:\n apiResult = api.Monitor.create(\n type=\"metric alert\",\n query=\"avg(last_5m):sum:system.net.bytes_rcvd{host:host0} > 100\",\n name=\"# Bytes received on host0\",\n message=\"We may need to add web hosts if this is consistently high.\",\n tags=tags,\n options=monitor_options)\n print(apiResult)\nexcept:\n print(\"Error\")\n","sub_path":"create_Monitor_python_EU.py","file_name":"create_Monitor_python_EU.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"400717122","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import gettext as trans\n\nfrom recrutase.importacao.models import Importar\n\nimport os\n\nclass FormImportacao(forms.ModelForm):\n arquivo = forms.FileField(label=trans('Arquivo CSV'))\n \n class Meta:\n fields = ('arquivo',) \n model = Importar\n \n def clean_curriculo(self):\n iArquivo= str(self.cleaned_data[\"arquivo\"])\n if iArquivo in ['', ' ', None, 'None']:\n return self.cleaned_data[\"arquivo\"]\n iExtensaoLista= os.path.splitext(iArquivo)\n iExtensao= iExtensaoLista[len(iExtensaoLista)-1]\n iExtensao = iExtensao.lower()\n if iExtensao not in ['.csv',]:\n raise forms.ValidationError(\"Formato não permitido, é necessário que o arquivo esteja em CSV!\")\n else:\n return self.cleaned_data[\"arquivo\"] \n \n def __init__(self, *args, **kwargs):\n super(FormImportacao, self).__init__(*args, **kwargs)\n self.fields['arquivo'].required = True\n self.fields['arquivo'].error_messages['required'] = trans(u'O campo arquivo é obrigatório') \n \nclass FormColunas(forms.Form): \n \n def __init__(self, *args, **kwargs):\n iListaColunas = []\n iListaColunas.append((0, 'selecione'))\n iLista = []\n for i, iValor in enumerate(kwargs.pop('iListaColunas')):\n iLista.append((i+1, iValor))\n iListaColunas = iListaColunas + iLista\n super(FormColunas, self).__init__(*args, **kwargs)\n iLista = []\n iLista.append((1, 'CPF'))\n iLista.append((2, 'Nome'))\n iLista.append((3, 'Sobrenome'))\n iLista.append((4, 'Email'))\n iLista.append((5, 'Telefone'))\n iLista.append((6, 'Celular'))\n iLista.append((7, 'Endereco'))\n iLista.append((8, 'Cidade'))\n iLista.append((9, 'UF'))\n iLista.append((10, 'Nascimento'))\n for i, iColuna in enumerate(iLista):\n self.fields[iColuna[1].lower()] = forms.ChoiceField(label='%s'%str(iColuna[1]))\n self.fields[iColuna[1].lower()].choices = iListaColunas\n if i in [1,2,3,5,8,9]:\n self.fields[iColuna[1].lower()].required = True\n self.fields[iColuna[1].lower()].label = '%s*' % str(iColuna[1])\n else:\n self.fields[iColuna[1].lower()].required = False\n self.fields[iColuna[1].lower()].label = '%s' % str(iColuna[1])\n","sub_path":"PyProject_Recrutase/recrutase/importacao/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"501386169","text":"import argparse\nimport sys\nfrom datetime import datetime\nimport time\nimport os\nfrom tqdm import trange\nimport math\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import deque\n\nimport gc\ntry:\n\tfrom manta import *\nexcept ImportError:\n\tpass\n\nimport sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir)\n\nfrom scene_storage import *\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--log_dir\", type=str, default='data/liquid{}-{}-{}_pos_size{}_f{}')\n\nparser.add_argument(\"--num_param\", type=int, default=4)\nparser.add_argument(\"--path_format\", type=str, default='%d_%d.npz')\nparser.add_argument(\"--screenshot_path_format\", type=str, default='%d_%d.jpg')\n\nparser.add_argument(\"--p0\", type=str, default='scenes')\nparser.add_argument(\"--p1\", type=str, default='frames')\nparser.add_argument(\"--p2\", type=str, default='src_x_pos')\nparser.add_argument(\"--p3\", type=str, default='src_radius')\n\nnum_s = 200\nnum_f = 600\nnum_sim = num_s*num_f\n\nparser.add_argument(\"--num_src_x_pos\", type=int, default=num_f)\nparser.add_argument(\"--min_src_x_pos\", type=float, default=0.2)\nparser.add_argument(\"--max_src_x_pos\", type=float, default=0.8)\nparser.add_argument(\"--num_src_radius\", type=int, default=num_f)\nparser.add_argument(\"--min_src_radius\", type=float, default=0.04)\nparser.add_argument(\"--max_src_radius\", type=float, default=0.08)\n\nparser.add_argument(\"--min_scenes\", type=int, default=0)\nparser.add_argument(\"--max_scenes\", type=int, default=num_s-1)\nparser.add_argument(\"--num_scenes\", type=int, default=num_s)\nparser.add_argument(\"--min_frames\", type=int, default=0)\nparser.add_argument(\"--max_frames\", type=int, default=num_f-1)\nparser.add_argument(\"--num_frames\", type=int, default=num_f)\nparser.add_argument(\"--num_simulations\", type=int, default=num_sim)\n\nparser.add_argument(\"--resolution_x\", type=int, default=32)\nparser.add_argument(\"--resolution_y\", type=int, default=24)\nparser.add_argument(\"--resolution_z\", type=int, default=1)\nparser.add_argument(\"--gravity\", type=float, default=-1e-3)\nparser.add_argument(\"--radius_factor\", type=float, default=1)\nparser.add_argument(\"--min_particles\", type=int, default=2)\nparser.add_argument(\"--bWidth\", type=int, default=1)\nparser.add_argument(\"--open_bound\", type=bool, default=False)\nparser.add_argument(\"--time_step\", type=float, default=0.5)\nparser.add_argument(\"--accuracy\", type=float, default=1e-3)\n\nparser.add_argument(\"--src_y_pos\", type=float, default=0.6)\nparser.add_argument(\"--basin_y_pos\", type=float, default=0.2)\n\nparser.add_argument('--output_images', action='store_true')\nparser.add_argument('--show_gui', action='store_true')\nparser.add_argument('--dont_delete_images', action='store_true')\n\nargs = parser.parse_args()\nargs.log_dir = args.log_dir.format(args.resolution_x, args.resolution_y, args.resolution_z, args.num_scenes, args.num_frames)\nargs.log_dir = args.log_dir if args.resolution_z <= 1 else args.log_dir + \"_3d\"\nargs.max_scenes = args.num_scenes - 1\nargs.max_frames = args.num_frames - 1\nargs.num_simulations = args.num_scenes * args.num_frames\nargs.num_src_x_pos = args.num_frames\nargs.num_src_radius = args.num_frames\n\nis_3d = args.resolution_z > 1\ndont_delete_images = args.dont_delete_images\n\ndef main():\n\tfield_type = ['v', 'l']\n\tprepare_simulation_directory(args, field_type)\n\n\tp1_space = np.linspace(args.min_src_x_pos, \n\t\t\t\t\t\t args.max_src_x_pos,\n\t\t\t\t\t\t int(1.0 + math.sqrt(args.num_scenes)) )\n\tp2_space = np.linspace(args.min_src_radius,\n\t\t\t\t\t\t args.max_src_radius,\n\t\t\t\t\t\t int(1.0 + math.sqrt(args.num_scenes)) )\n\tp_list = np.array(np.meshgrid(p1_space, p2_space)).T.reshape(-1, 2)\n\n\t# create solver\n\tm = initialize_manta(args)\n\n\tgravity = vec3(0, args.gravity, 0)\n\n\tv_ = np.zeros([m.res_z, m.res_y, m.res_x, 3], \tdtype=np.float32)\n\tl_ = np.zeros([m.res_z, m.res_y, m.res_x], \t\tdtype=np.float32)\n\n\t# flip\n\tm.velOld \t= m.s.create(MACGrid)\n\tm.tmpVec3 \t= m.s.create(VecGrid)\n\tm.pp \t\t= m.s.create(BasicParticleSystem) \n\tm.pVel \t\t= m.pp.create(PdataVec3)\n\t# acceleration data for particle nbs\n\tm.pindex \t= m.s.create(ParticleIndexSystem) \n\tm.gpi \t\t= m.s.create(IntGrid)\n\n\tprint('start generation')\n\tsim_id = 0\n\tv_range = [np.finfo(np.float).max, np.finfo(np.float).min]\n\tl_range = [np.finfo(np.float).max, np.finfo(np.float).min]\n\n\tp0_list = []\n\tp1_list = []\n\tfor i in trange(args.num_scenes, desc='scenes'):\n\t\tp = p_list[i]\n\n\t\tp0_deq = deque([-1]*args.num_frames, args.num_frames)\n\t\tp1_deq = deque([-1]*args.num_frames, args.num_frames)\n\n\t\tstart_time = time.time()\n\t\t\n\t\tm.flags.initDomain(boundaryWidth=args.bWidth)\n\t\tif args.open_bound:\n\t\t\tsetOpenBound(m.flags, args.bWidth,'xXyY', FlagOutflow|FlagEmpty)\n\n\t\tm.vel.clear()\n\t\tm.pressure.clear()\n\t\t\n\t\tm.velOld.clear()\n\t\tm.tmpVec3.clear()\n\t\n\t\tm.pp.clear()\n\t\tm.pVel.clear()\n\n\t\tfluidBasin = Box(parent=m.s, p0=m.gs*vec3(0,0,0), p1=m.gs*vec3(1.0,args.basin_y_pos,1.0)) # basin\n\t\tdropCenter = vec3(p[0],args.src_y_pos,0.5)\n\t\tdropRadius = p[1]\n\t\tfluidDrop = Sphere(parent=m.s, center=m.gs*dropCenter, radius=m.gs.x*dropRadius)\n\t\tphi = fluidBasin.computeLevelset()\n\t\tphi.join(fluidDrop.computeLevelset())\n\n\t\tm.flags.updateFromLevelset(phi)\n\t\tsampleLevelsetWithParticles(phi=phi, flags=m.flags, parts=m.pp, discretization=2, randomness=0.05)\n\n\t\tfluidVel = Sphere(parent=m.s, center=m.gs*dropCenter, radius=m.gs.x*(dropRadius+0.05))\n\t\tfluidSetVel = vec3(0,-1,0)\n\t\t\n\t\t# set initial velocity\n\t\tfluidVel.applyToGrid(grid=m.vel, value=fluidSetVel)\n\t\tmapGridToPartsVec3(source=m.vel, parts=m.pp, target=m.pVel)\n\n\t\tfor t in range(args.num_frames):\n\t\t\t# FLIP \n\t\t\tm.pp.advectInGrid(flags=m.flags, vel=m.vel, integrationMode=IntRK4, deleteInObstacle=False)\n\t\t\t# make sure we have velocities throught liquid region\n\t\t\tmapPartsToMAC(vel=m.vel, flags=m.flags, velOld=m.velOld, parts=m.pp, partVel=m.pVel, weight=m.tmpVec3) \n\t\t\textrapolateMACFromWeight(vel=m.vel, distance=2, weight=m.tmpVec3) # note, tmpVec3 could be free'd now...\n\t\t\tmarkFluidCells(parts=m.pp, flags=m.flags)\n\n\t\t\t# create approximate surface level set, resample particles\n\t\t\tgridParticleIndex(parts=m.pp , flags=m.flags, indexSys=m.pindex, index=m.gpi)\n\t\t\tunionParticleLevelset(m.pp, m.pindex, m.flags, m.gpi, phi, args.radius_factor) \n\t\t\tresetOutflow(flags=m.flags, parts=m.pp, index=m.gpi, indexSys=m.pindex) \n\t\t\tcopyGridToArrayLevelset(target=l_, source=phi)\n\t\t\t\n\t\t\t# extend levelset somewhat, needed by particle resampling in adjustNumber\n\t\t\textrapolateLsSimple(phi=phi, distance=4, inside=True); \n\n\t\t\t# forces & pressure solve\n\t\t\taddGravity(flags=m.flags, vel=m.vel, gravity=gravity)\n\t\t\tsetWallBcs(flags=m.flags, vel=m.vel)\n\t\t\tsolvePressure(flags=m.flags, vel=m.vel, pressure=m.pressure, cgAccuracy=args.accuracy, phi=phi)\n\t\t\tsetWallBcs(flags=m.flags, vel=m.vel)\n\n\t\t\t# set source grids for resampling, used in adjustNumber!\n\t\t\tm.pVel.setSource(m.vel, isMAC=True)\n\t\t\tadjustNumber(parts=m.pp, vel=m.vel, flags=m.flags, minParticles=args.min_particles, maxParticles=2*args.min_particles, phi=phi, radiusFactor=args.radius_factor)\n\n\t\t\t# save before extrapolation\n\t\t\tcopyGridToArrayMAC(target=v_, source=m.vel)\n\n\t\t\t# make sure we have proper velocities\n\t\t\textrapolateMACSimple(flags=m.flags, vel=m.vel, distance=4)\n\t\t\tflipVelocityUpdate(vel=m.vel, velOld=m.velOld, flags=m.flags, parts=m.pp, partVel=m.pVel, flipRatio=0.97)\n\t\t\t\n\t\t\tp0_deq.append(p[0])\n\t\t\tp1_deq.append(p[1])\n\n\t\t\tparam_ = [list(p0_deq), list(p1_deq)]\n\n\t\t\t# Store fields to disk\n\t\t\tv_range = save_npz(v_[...,:3 if is_3d else 2], v_range, 'v', i, t, param_, args)\n\t\t\tl_range = save_npz(l_, l_range, 'l', i, t, param_, args)\n\n\t\t\tm.s.step()\n\n\t\t\tif args.output_images:\n\t\t\t\tscreenshot(m.gui, args.log_dir, t + i * args.num_frames, density=phi, scale=1.0)\n\n\t\t\tsim_id += 1\n\n\t\tp0_list.append(param_[0])\n\t\tp1_list.append(param_[1])\n\n\t\tgc.collect()\n\t\tduration = time.time() - start_time\n\n\tif args.output_images:\n\t\tconvert_sequence( os.path.join(args.log_dir, 'screenshots'), output_name=args.log_dir.rsplit(\"/\",1)[-1], file_format=\"%06d.jpg\" if m.gui else \"%06d.ppm\", delete_images=not dont_delete_images )\n\n\tn_path = os.path.join(args.log_dir, 'n.npz')\n\tnp.savez_compressed(n_path, nx=p0_list, nz=p1_list)\n\n\t# Store data range\n\tsave_range(v_range, \"v\", args)\n\tsave_range(l_range, \"l\", args)\n\n\tprint('Done')\n\n\nif __name__ == '__main__':\n main()","sub_path":"scene/experimental/liquid_pos_size.py","file_name":"liquid_pos_size.py","file_ext":"py","file_size_in_byte":8329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"11606007","text":"import re\nimport pandas as pd\n\n# read data\nwith open('results.md','r') as fh:\n\ttext = fh.read()\n\n# split sections and heuristic labels\nheuristics = re.findall(r'''\\#\\# (.+)''', text)\n\nchunks = re.split(r'''\\#\\# .+''', text)[1:]\nchunks = [re.findall(r'''Match .+\\n''', x) for x in chunks]\n\nsplit_chunks = [re.findall(r'''Match ([1-7]):\\s+(\\w+)\\s+vs\\s+(\\w+)\\s+Result: (\\d+) to (\\d+)''', \n ''.join(x)) for x in chunks]\n\n# create dataframes from sections\ncols = ['match','opp1','opp2','wins','losses']\nsplit_df = [pd.DataFrame(x, columns=cols) for x in split_chunks]\n\n\nfor heur,df in zip(heuristics, split_df):\n\tdf['heuristic'] = heur\n\ndata = pd.concat(split_df).reset_index(drop=True)\ndata = data[['heuristic'] + cols]\n\ndata['wins'] = data.wins.astype('int')\ndata['losses'] = data.losses.astype('int')\ndata['games'] = data.wins + data.losses\n\n# calculate statistics\nstats = data.groupby(['heuristic','opp1']).agg({'wins':sum, 'losses':sum})\nstats['percent'] = stats.wins.astype('float') / 1400\n\nprint(stats)","sub_path":"get_tournament_stats.py","file_name":"get_tournament_stats.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"88094504","text":"import os\r\nfrom PyQt5.QtGui import QImage\r\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal, pyqtSlot, QFile, QDate, QDir, QFileInfo\r\nimport statistics \r\n\r\nfrom Excell import Excel_Report\r\n\r\n'''\r\nClass: excelThread\r\n\tWorker thread to handle populating an excel sheet\r\n\r\nParameters: \r\n\tQThread - inherits QThread attributes\r\n'''\r\nclass excelThread(QThread):\r\n\tsendReportName = pyqtSignal(str, bool)\r\n\tsendOutput = pyqtSignal(str)\r\n\r\n\t'''\r\n\tFunction: __init__\r\n\t\tSets initial values\r\n\t'''\t\r\n\tdef __init__(self):\r\n\t\tQThread.__init__(self)\r\n\t\tself.reportPath = \"\"\r\n\t\tself.name = \"\"\r\n\t\tself.dataAnalyDIR = \"\"\r\n\t\tself.state = False\r\n\t\tself.DAstate = False\r\n\t\tself.datasheet_dict = {}\r\n\t\tself.excel = Excel_Report()\r\n\t\tself.protocolHeader = [\"Section\", \"Min\", \"Max\", \"Unit\", \"Value\", \"Result\", \"Comment\"]\r\n\t\tself.equipmentHeader = [\"Name\", \"Model\", \"ID\", \"Calibration ID\", \"Cal Due Date\"]\r\n\t\tself.toolHeader = [\"Name\", \"Version\"]\r\n\t\tself.materialHeader = [\"Name\", \"Serial Number\", \"Revision\", \"Firmware\", \"Software\"]\r\n\r\n\t'''\r\n\tFunction: setDataAnalysis\r\n\t\tSet worker thread to perform data analysis\r\n\r\n\tParameters: \r\n\t \tdataDIR - DIR of all reports for data analysis \r\n\t \tDAstate - set state to start worker thread\r\n\t'''\r\n\tdef setDataAnalysis(self, dataDIR, state):\r\n\t\tself.dataAnalyDIR = dataDIR\r\n\t\tself.DAstate = state\r\n\r\n\t'''\r\n\tFunction: setGenerateExcel\r\n\t\tSet worker thread to generate excel file\r\n\r\n\tParameters: \r\n\t \toutputDict - output dictionary to populate excel report \r\n\t \texportPath - report export network path\r\n\t \treportName - name for report\r\n\t \tstate \t - set state to start worker thread\r\n\t'''\r\n\tdef setGenerateExcel(self, outputDict, exportPath, reportName, state):\r\n\t\tself.datasheet_dict = outputDict\r\n\t\tself.reportPath = exportPath\r\n\t\tself.name = reportName\r\n\t\tself.state = state\r\n\t\t\r\n\t'''\r\n\tFunction: run\r\n\t\tThis function is started by.start() and runs the main portion of the code\r\n\t'''\r\n\tdef run(self):\r\n\t\tself.setPriority(QThread.HighestPriority)\r\n\r\n\t\t# ---------- Excel Report ----------\r\n\t\tif self.state:\r\n\t\t\t# create excel sheet and set header\r\n\t\t\topenFile = self.excel.startExcelSheet(\tself.reportPath, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.name, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.datasheet_dict.get('Serial Number'), \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.datasheet_dict.get('Protocol Name'))\r\n\r\n\t\t\t# row to start populating data\r\n\t\t\trow = 4\r\n\r\n\t\t\t# Equipment Used section\r\n\t\t\tif (len(self.datasheet_dict.get('Equipment')) > 0):\r\n\t\t\t\t\r\n\t\t\t\t# add header\r\n\t\t\t\trow = self.excel.addHeaderRow(row, 1, \"Equipment\", self.equipmentHeader)\r\n\r\n\t\t\t\t# add content\r\n\t\t\t\tfor i in self.datasheet_dict['Equipment']:\r\n\t\t\t\t\tif (i.get('Name') != \"\"):\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Name'), row, 1)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Model'), row, 2)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('ID'), row, 3)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Cal ID'), row, 4)\r\n\r\n\t\t\t\t\t\t# color out of calibration equipment red\r\n\t\t\t\t\t\tif (i.get('Cal Due Date') != \"\"):\r\n\t\t\t\t\t\t\tdate = QDate.fromString(i.get('Cal Due Date'), \"MMM d, yyyy\") \r\n\t\t\t\t\t\t\tif (date < QDate.currentDate()):\r\n\t\t\t\t\t\t\t\tself.excel.colorCellFail(row, 5)\r\n\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Cal Due Date'), row, 5)\r\n\t\t\t\t\t\trow += 1\r\n\t\t\trow += 1\r\n\r\n\t\t\t# Tools section\r\n\t\t\tif (len(self.datasheet_dict.get('Tools')) > 0):\r\n\r\n\t\t\t\t# add header\r\n\t\t\t\trow = self.excel.addHeaderRow(row, 1, \"Tools\", self.toolHeader)\r\n\r\n\t\t\t\t# add content\r\n\t\t\t\tfor i in self.datasheet_dict['Tools']:\r\n\t\t\t\t\tif (i.get('Name') != \"\"):\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Name'), row, 1)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Version'), row, 2)\r\n\t\t\t\t\t\trow += 1\r\n\t\t\trow += 1\r\n\r\n\t\t\t# Materials section\r\n\t\t\tif (len(self.datasheet_dict.get('Material')) > 0):\r\n\r\n\t\t\t\t# add header\r\n\t\t\t\trow = self.excel.addHeaderRow(row, 1, \"Materials\", self.materialHeader)\r\n\r\n\t\t\t\t# add content\r\n\t\t\t\tfor i in self.datasheet_dict['Material']:\r\n\t\t\t\t\tif (i.get('Name') != \"\"):\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Name'), row, 1)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Serial Number'), row, 2)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Revision'), row, 3)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Firmware'), row, 4)\r\n\t\t\t\t\t\tself.excel.writeExcelEntry(i.get('Software'), row, 5)\r\n\t\t\t\t\t\trow += 1\r\n\t\t\trow += 1\r\n\r\n\t\t\t# write protocol header\r\n\t\t\trow = self.excel.addHeaderRow(row, 1, \"Test Procedure\", self.protocolHeader)\r\n\r\n\t\t\t# populate excel report\r\n\t\t\tfor i in self.datasheet_dict[\"Procedure\"]:\r\n\t\t\t\tself.excel.writeExcelEntry(i.get('Section'), row, 1)\r\n\t\t\t\tself.excel.writeExcelEntry(i.get('Min'), row, 2)\r\n\t\t\t\tself.excel.writeExcelEntry(i.get('Max'), row, 3)\r\n\t\t\t\tself.excel.writeExcelEntry(i.get('Unit'), row, 4)\r\n\t\t\t\tif (i.get('Value')== \"\"):\r\n\t\t\t\t\tself.excel.writeExcelEntry('N/A', row, 5)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.excel.writeExcelEntry(i.get('Value'), row, 5)\r\n\t\t\t\tself.excel.writeExcelEntry(i.get('Result'), row, 6)\r\n\r\n\t\t\t\t# color fails red\r\n\t\t\t\tif (i.get('Result') == 'F'):\r\n\t\t\t\t\tself.excel.colorCellFail(row, 6)\r\n\t\t\t\tself.excel.writeExcelEntry(i.get('Comment'), row, 7)\r\n\t\t\t\trow += 1\r\n\r\n\t\t\trow += 2\r\n\r\n\t\t\t# input last row\r\n\t\t\tself.excel.writeSignature(row)\r\n\r\n\t\t\t# save excel report/ emit error \r\n\t\t\ttry:\r\n\t\t\t\treportname = self.excel.SaveSheet(openFile)\r\n\t\t\t\tself.sendReportName.emit(reportname, True)\r\n\t\t\t\t\r\n\t\t\t# error is sheet is opened by user and trying to save\r\n\t\t\texcept PermissionError as e:\r\n\t\t\t\tself.sendReportName.emit(\"FAILED: Please Close Opened Excel Sheet\", False)\r\n\r\n\t\t\tself.state = False\r\n\r\n\t\t# ---------- Data Analysis ----------\r\n\t\tif self.DAstate:\r\n\t\t\t# put reports in folder in a local list\r\n\t\t\tDataAnalysisDIR = QDir(self.dataAnalyDIR)\r\n\t\t\treportList = DataAnalysisDIR.entryList(QDir.Files, QDir.Time)\r\n\t\t\treportDataList = []\r\n\r\n\t\t\t# Start data analysis sheet\r\n\t\t\toutputSheetName = self.excel.startDataAnalysis(self.dataAnalyDIR)\r\n\r\n\t\t\t# get base name to not parse\r\n\t\t\tbaseReportName = QFileInfo(outputSheetName)\r\n\t\t\t\r\n\t\t\t# iterate through each report and append result data dicts to a list\r\n\t\t\tfor report in reportList:\r\n\t\t\t\tif (report.endswith('.xlsx') and report != baseReportName.baseName() + '.xlsx'):\r\n\t\t\t\t\tinputReport = self.dataAnalyDIR + '/' + report\r\n\t\t\t\t\tresultDict = self.excel.parseReport(inputReport)\r\n\t\t\t\t\treportDataList.append(resultDict)\r\n\r\n\t\t\t# declare local variable for populating data analysis\r\n\t\t\tserNumList = []\r\n\t\t\trow = 3\r\n\t\t\tdataCol = 5\r\n\t\t\tnumTests = len(reportDataList[0][\"Section\"])\r\n\t\t\tnumReports = len(reportDataList)\r\n\r\n\t\t\t# add serial numbers in list for header\r\n\t\t\tfor i in range(0, numReports):\r\n\t\t\t\tserNumList.append(reportDataList[i].get(\"Serial Number\"))\r\n\r\n\t\t\t# write header to data analysis\r\n\t\t\trow = self.excel.addHeaderRow(row, dataCol, \"Serial Numbers\", serNumList)\r\n\t\t\tself.excel.addSingleHeader(row - 1, 4, \"Unit\")\r\n\t\t\tself.excel.addSingleHeader(row - 1, 3, \"Max\")\r\n\t\t\tself.excel.addSingleHeader(row - 1, 2, \"Min\")\r\n\t\t\tself.excel.addSingleHeader(row - 1, 1, \"Section\")\r\n\r\n\t\t\t# populate with report data\r\n\t\t\tfor test in range(0, numReports):\r\n\t\t\t\tfor i in range(0, numTests):\r\n\t\t\t\t\tself.excel.writeExcelEntry(reportDataList[test][\"Section\"][i], row + i, 1)\r\n\t\t\t\t\tself.excel.writeExcelEntry(reportDataList[test][\"Min\"][i], row + i, 2)\r\n\t\t\t\t\tself.excel.writeExcelEntry(reportDataList[test][\"Max\"][i], row + i, 3)\r\n\t\t\t\t\tself.excel.writeExcelEntry(reportDataList[test][\"Unit\"][i], row + i, 4)\r\n\t\t\t\t\tself.excel.writeExcelEntry(reportDataList[test][\"Value\"][i], row + i, dataCol + test)\r\n\r\n\t\t\t\t\t# color failed tests red\r\n\t\t\t\t\tif (reportDataList[test][\"Result\"][i] == \"F\"):\r\n\t\t\t\t\t\tself.excel.colorCellFail(row + i, dataCol + test)\r\n\r\n\t\t\t# add in Data Analysis columns\r\n\t\t\tstanDevCol \t= dataCol + numReports\r\n\t\t\tminCol \t\t= dataCol + numReports + 1\r\n\t\t\tmaxCol\t\t= dataCol + numReports + 2\r\n\t\t\theaderRow \t= row - 1\r\n\t\t\tcurrRow\t\t= row\r\n\t\t\ttestIndex \t= 0\r\n\r\n\t\t\tself.excel.addSingleHeader(headerRow, stanDevCol, \"Standard Deviation\")\r\n\t\t\tself.excel.addSingleHeader(headerRow, minCol, \"Min\")\r\n\t\t\tself.excel.addSingleHeader(headerRow, maxCol, \"Max\")\r\n\r\n\t\t\t# iterate all tests and get list of values of each test\r\n\t\t\ttestDataList = self.excel.getTestDataList(numTests, numReports, currRow, dataCol)\r\n\r\n\t\t\t# perform analysis\r\n\t\t\tfor test in testDataList:\r\n\r\n\t\t\t\tif ('N/A' in test):\r\n\t\t\t\t\tself.excel.writeExcelEntry(\"N/A\", currRow, stanDevCol)\r\n\t\t\t\t\tself.excel.writeExcelEntry(\"N/A\", currRow, minCol)\r\n\t\t\t\t\tself.excel.writeExcelEntry(\"N/A\", currRow, maxCol)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tstandardDeviation = statistics.stdev(testDataList[testIndex])\r\n\t\t\t\t\tminimumVal = min(testDataList[testIndex])\r\n\t\t\t\t\tmaximumVal = max(testDataList[testIndex])\r\n\t\t\t\t\tself.excel.writeExcelEntry(standardDeviation, currRow, stanDevCol)\r\n\t\t\t\t\tself.excel.writeExcelEntry(minimumVal, currRow, minCol)\r\n\t\t\t\t\tself.excel.writeExcelEntry(maximumVal, currRow, maxCol)\r\n\r\n\t\t\t\t# increment current row and test index\r\n\t\t\t\tcurrRow += 1 \r\n\t\t\t\ttestIndex += 1\r\n\r\n\t\t\t# get standard deviation column\r\n\t\t\tstanDevData = self.excel.getDataColumn(numTests, row, stanDevCol)\r\n\r\n\r\n\t\t\tprint(stanDevData)\r\n\r\n\r\n\r\n\t\t\t# make bar graph --- UPDATE FUNCTION\r\n\t\t\tself.excel.createBarGraph(row, numTests, stanDevCol)\r\n\r\n\t\t\t# for i in range(0, numTests):\r\n\t\t\t# \tfor test in range(0, numReports):\r\n\t\t\t# \t\tprint(\"Test: \" + str(test))\r\n\t\t\t# \t\tprint(\"Result: \" + str(reportDataList[test][\"Result\"][i]))\r\n\t\t\t# \t\tprint(\"Value: \" + str(reportDataList[test][\"Value\"][i]))\r\n\r\n\t\t\t# save data analysis sheet/ emit error \r\n\t\t\ttry:\r\n\t\t\t\treportname = self.excel.SaveSheet(outputSheetName)\r\n\t\t\t\tself.sendOutput.emit(\"\")\r\n\t\t\t\tself.sendOutput.emit(\"Data Analysis Successful!\")\r\n\t\t\t\tself.sendOutput.emit(\"Saved As: \" + reportname)\r\n\r\n\t\t\t# error is sheet is opened by user and trying to save\r\n\t\t\texcept PermissionError as e:\t\r\n\t\t\t\tself.sendOutput.emit(\"\")\r\n\t\t\t\tself.sendOutput.emit(\"FAILED: Please Close Opened Excel Sheet\")\r\n\t\t\t\tself.sendOutput.emit(str(e))\r\n\r\n\t\t\tself.DAstate = False","sub_path":"excelThread.py","file_name":"excelThread.py","file_ext":"py","file_size_in_byte":9751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"69290535","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\nclasses=[]\nclassesRange=[]\ntestData=[]\nmean=[]\nvariance=[]\ndimension=2\nconfusionMatClass=[]\nconfusionMatrix=[]\ncovarianceMatrix=np.zeros(shape=(dimension,dimension))\ncovarianceMatrixInv=np.zeros(shape=(dimension,dimension))\naverage_variance=0\n\ndef calcPrereq(filename):\n\tfile=open(filename)\n\tdata=[]\n\tfor line in file:\n\t\tnumber_strings=line.split()\n\t\tnumbers=[float(n) for n in number_strings]\n\t\tdata.append(numbers)\n\ttempClass=np.array(data)\n\ttempClassRange=[]\n\ttempClassRange.append(np.amin(tempClass,axis=0))\n\ttempClassRange.append(np.amax(tempClass,axis=0))\n\tdata_train=[data[i] for i in range(long(0.75*len(tempClass)))]\n\ttempTestData=[data[i] for i in range(long(0.75*len(tempClass)),len(tempClass))]\n\ttempClassTrain=np.array(data_train)\n\ttempMean=tempClassTrain.mean(axis=0)\n\ttempVariance=[0,0]\n\tfor j in range(len(tempMean)):\n\t\tsumj=0\n\t\tfor i in range(len(tempClassTrain)):\n\t\t\tsumj+=(tempClassTrain[i][j]-tempMean[j])*(tempClassTrain[i][j]-tempMean[j]);\n\t\ttempVariance[j]=sumj/len(tempClassTrain)\n\tclasses.append(tempClassTrain)\n\tmean.append(tempMean)\n\tvariance.append(tempVariance)\n\ttestData.append(tempTestData)\n\tclassesRange.append(tempClassRange)\n\ndef gi(x):\n\tval=[0 for i in range(len(classes))]\n\tfor i in range(len (classes)):\n\t\tval[i]=-1.0/2.0/average_variance;\n\t\tfirst_term=0;\n\t\tfor j in range(dimension):\n\t\t\tfirst_term+=(x[j]-mean[i][j])*(x[j]-mean[i][j])\n\t\tval[i]*=first_term\n\t\ttot=0\n\t\tfor j in range(len(classes)):\n\t\t\ttot+=len(classes[j])\n\t\tval[i]+=math.log(float(len(classes[i]))/tot)\n\treturn np.argmax(val)\n\ndef g(x,first,second):\n\tval=1.0/2.0/average_variance;\n\tfirst_term=0;\n\tfor i in range(dimension):\n\t\tfirst_term+=x[i]*(mean[second][i]-mean[first][i])\n\tfirst_term*=2\n\tsecond_term=0;\n\tfor i in range(dimension):\n\t\tsecond_term+=(mean[first][i]*mean[first][i])-(mean[second][i]*mean[second][i])\n\tval*=first_term+second_term\n\tval+=math.log(float(len(classes[first]))/len(classes[second]))\n\tif val<0:\n\t\treturn first\n\telse:\n\t\treturn second\n\ndef calcConfusion():\n\tglobal confusionMatrix\n\tconfusionMatrix=[[0 for i in range(len(classes))] for i in range(len(classes))]\n\tfor i in range(len(classes)):\n\t\tfor j in range(len(testData[i])):\n\t\t\tx=testData[i][j]\n\t\t\tret=gi(x)\n\t\t\tconfusionMatrix[ret][i]+=1\n\ndef calcConfusionClass(ind):\n\ttemp=[[0 for i in range(2)] for i in range(2)]\n\tfor j in range(len(classes)):\n\t\tfor i in range(len(testData[j])):\n\t\t\tx=testData[j][i]\n\t\t\tret=gi(x)\n\t\t\tif ind==j:\n\t\t\t\tif ret==ind:\n\t\t\t\t\ttemp[0][0]+=1\n\t\t\t\telse:\n\t\t\t\t\ttemp[1][0]+=1\n\t\t\telse: \n\t\t\t\tif ret==ind:\n\t\t\t\t\ttemp[0][1]+=1\n\t\t\t\telse:\n\t\t\t\t\ttemp[1][1]+=1\n\tconfusionMatClass.append(temp)\n\t\nprint( \"\\nThis program is a Baye's Classifier assuming the case when Covariance Matrix = (sigma)^2*I and is same for all classes.\\n\")\n\nprint( \"Enter which data you want to use : \")\nprint( \"1. Linearly Separable Data.\")\nprint( \"2. Non-linearly Separable Data.\")\nprint( \"3. Real World Data.\")\nchoice=input(\"Choice : \")\t\n\nif(choice==1):\n\tcalcPrereq(\"../../data/ls_group2/Class1.txt\")\n\tcalcPrereq(\"../../data/ls_group2/Class2.txt\")\n\tcalcPrereq(\"../../data/ls_group2/Class3.txt\")\nelif(choice==2):\n\tcalcPrereq(\"../../data/nl_group2/Class1.txt\")\n\tcalcPrereq(\"../../data/nl_group2/Class2.txt\")\n\tcalcPrereq(\"../../data/nl_group2/Class3.txt\")\nelif(choice==3):\n\tcalcPrereq(\"../../data/rd_group2/Class1.txt\")\n\tcalcPrereq(\"../../data/rd_group2/Class2.txt\")\n\tcalcPrereq(\"../../data/rd_group2/Class3.txt\")\nelse:\n\tprint( \"Wrong input! Exiting.\\n\")\n\nchoices=['ls','nl','rd']\n\nfor i in range(len(classes)):\n\tfor j in range(dimension):\n\t\taverage_variance+=variance[i][j]\naverage_variance/=len(classes)*dimension\n\ncovarianceMatrix=average_variance*np.identity(dimension)\ncovarianceMatrixInv=np.asmatrix(covarianceMatrix).I\n\nprint( \"\\nThe average variance calculated for all classes comes out to be\",average_variance)\n\nprint( \"\\nThe mean and variance vectors for different classes are: \\n\")\nfor i in range(len(mean)):\n\tprint( \"Class \",i+1,\": Mean - \",mean[i],\" Var - \",variance[i])\n\nfor i in range(len(classes)):\n\tcalcConfusionClass(i)\n\ncalcConfusion()\n\nAccuracy=[]\nPrecision=[]\nRecall=[]\nFMeasure=[]\n\nprint( \"\\nThe Confusion Matrices for different classes are: \")\nfor i in range(len(classes)):\n\tprint( \"\\nConfusion Matrix for class\",i+1,\": \\n\")\n\tprint( np.asmatrix(confusionMatClass[i]))\n\ttp=confusionMatClass[i][0][0]\n\tfp=confusionMatClass[i][0][1]\n\tfn=confusionMatClass[i][1][0]\n\ttn=confusionMatClass[i][1][1]\n\taccuracy=float(tp+tn)/(tp+tn+fp+fn)\n\tprecision=float(tp)/(tp+fp)\n\trecall=float(tp)/(tp+fn)\n\tfMeasure=2*precision*recall/(precision+recall)\n\tprint( \"\\nClassification Accuracy for class\",i+1,\"is\",accuracy)\n\tprint( \"Precision for class\",i+1,\"is\",precision)\n\tprint( \"Recall for class\",i+1,\"is\",recall)\n\tprint( \"F-measure for class\",i+1,\"is\",fMeasure)\n\tAccuracy.append(accuracy),Precision.append(precision),Recall.append(recall),FMeasure.append(fMeasure)\n\navgAccuracy,avgPrecision,avgRecall,avgFMeasure=0,0,0,0\nfor i in range (len(classes)):\n\tavgAccuracy+=Accuracy[i]\n\tavgPrecision+=Precision[i]\n\tavgRecall+=Recall[i]\n\tavgFMeasure+=FMeasure[i]\navgAccuracy/=len(classes)\navgPrecision/=len(classes)\navgRecall/=len(classes)\navgFMeasure/=len(classes)\n\nprint( \"\\nThe Confusion Matrix of all classes together is: \\n\")\nprint( np.asmatrix(confusionMatrix))\nprint( \"\\nAverage classification Accuracy is\",avgAccuracy)\nprint( \"Average precision is\",avgPrecision)\nprint( \"Average recall is\",avgRecall)\nprint( \"Average F-measure is\",avgFMeasure)\n\nprint( \"\\nPlease wait for a minute or two while the program generates graphs...\")\n\ncolors=['b','g','r']\ncolorsTestData=['c','m','y']\n\nl=1\nf=[]\n\nf.append(plt.figure(l))\nl+=1\nminArr=[0 for i in range(dimension)]\nmaxArr=[0 for i in range(dimension)]\nfor i in range(dimension):\n\tminArr[i]=classesRange[0][0][i]\n\tmaxArr[i]=classesRange[0][1][i]\n\nfor i in range(len(classesRange)):\n\tfor j in range(dimension):\n\t\tif(minArr[j]>classesRange[i][0][j]):\n\t\t\tminArr[j]=classesRange[i][0][j]\n\t\tif(maxArr[j]classesRange[k][0][i]):\n\t\t\t\tminArr[i]=classesRange[k][0][i]\n\t\t\tif(maxArr[i] Image:\n #today = datetime.today().astimezone() #requires pyhton 3.6\n today = datetime.today()\n data = self.api.getSchedule(today)\n\n draw = ImageDraw.Draw(img)\n\n self._draw_header(img, today)\n\n #print(utils.parseTime(data['data'][0]['date']))\n\n shows = []\n\n for i in range(len(data['data'])):\n for s in data['data'][i]['elements']:\n shows.append(s)\n\n print(len(shows))\n\n pos = 0\n hasCurrent = False\n cnt = 3 if detail else 6\n for i in range(len(shows)):\n timeStart = utils.parseTime(shows[i]['timeStart'])\n timeEnd = utils.parseTime(shows[i]['timeEnd'])\n\n if timeEnd < today:\n continue\n\n if timeStart < today and timeEnd > today and not hasCurrent:\n print(shows[i]['title'], i)\n rbtv_printer.printCurrent(img, shows[i], timeStart, timeEnd, today, rbtv_config.fontSmall)\n hasCurrent = True # sometimes shows overlap a few minutes\n continue\n \n if not upcoming:\n break\n rbtv_printer.printUpcomming(img, shows[i], timeStart, pos, detail)\n\n pos += 1\n if pos > cnt:\n break\n return img\n","sub_path":"python3/rbtv/rbtv.py","file_name":"rbtv.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"300642215","text":"'''\nFunction:\n Image to Patch Embedding\nAuthor:\n Zhenchao Jin\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ..normalization import BuildNormalization\n\n\n'''Image to Patch Embedding'''\nclass PatchEmbed(nn.Module):\n def __init__(self, in_channels=3, embed_dims=768, kernel_size=16, stride=16, padding=0, dilation=1, pad_to_patch_size=True, norm_cfg=None):\n super(PatchEmbed, self).__init__()\n self.embed_dims = embed_dims\n if stride is None: stride = kernel_size\n patch_size = kernel_size\n if isinstance(patch_size, int):\n patch_size = (patch_size, patch_size)\n self.patch_size = patch_size\n self.projection = nn.Conv2d(in_channels, embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)\n self.pad_to_patch_size = pad_to_patch_size\n if norm_cfg is not None:\n self.norm = BuildNormalization(norm_cfg['type'], (embed_dims, norm_cfg['opts']))\n else:\n self.norm = None\n '''forward'''\n def forward(self, x):\n H, W = x.shape[2], x.shape[3]\n if self.pad_to_patch_size:\n if H % self.patch_size[0] != 0:\n x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))\n if W % self.patch_size[1] != 0:\n x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1], 0, 0))\n x = self.projection(x)\n self.DH, self.DW = x.shape[2], x.shape[3]\n x = x.flatten(2).transpose(1, 2)\n if self.norm is not None:\n x = self.norm(x)\n return x","sub_path":"ssseg/modules/backbones/bricks/transformer/embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"360787884","text":"import argparse\nimport os\n\nimport textpreprocessor.create_vocabulary\n\nimport utils\n\ndef main(args):\n config = utils.Config()\n\n utils.mkdir(os.path.join(config.getpath(\"data\"), \"rstdt-vocab\"))\n\n filenames = os.listdir(os.path.join(config.getpath(\"data\"), \"rstdt\", \"renamed\"))\n filenames = [n for n in filenames if n.endswith(\".edus\")]\n filenames.sort()\n\n with open(os.path.join(config.getpath(\"data\"), \"rstdt\", \"tmp.preprocessing\", \"concat.edus.heads.deprel\"), \"w\") as f:\n for filename in filenames:\n deprels = utils.read_lines(os.path.join(config.getpath(\"data\"), \"rstdt\", \"renamed\", filename + \".heads\"),\n process=lambda line: line.split()[-1])\n for deprel in deprels:\n f.write(\"%s\\n\" % deprel)\n\n if args.with_root:\n special_words = [\"\"]\n else:\n special_words = []\n textpreprocessor.create_vocabulary.run(\n os.path.join(config.getpath(\"data\"), \"rstdt\", \"tmp.preprocessing\", \"concat.edus.heads.deprel\"),\n os.path.join(config.getpath(\"data\"), \"rstdt-vocab\", \"deprels.vocab.txt\"),\n prune_at=10000000,\n min_count=-1,\n special_words=special_words,\n with_unk=True)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--with_root\", action=\"store_true\")\n args = parser.parse_args()\n main(args)\n","sub_path":"preprocessing/build_deprel_vocabulary_rstdt.py","file_name":"build_deprel_vocabulary_rstdt.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"537001824","text":"\n\n#calss header\nclass _STATEROOM():\n\tdef __init__(self,): \n\t\tself.name = \"STATEROOM\"\n\t\tself.definitions = [u'a large room, for example in a castle or palace, used for formal or important occasions: ', u'a room where you sleep on a cruise ship (= a large ship like a hotel): ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_stateroom.py","file_name":"_stateroom.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"127316762","text":"#encoding:utf-8\n# Create your views here.\nfrom django.http import Http404\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom couching.models import PerfilUsuario, Carrera, Docencia, Couching\nfrom couching.forms import DocenciaFormulario\nfrom django.contrib import messages\n\n###############################################################################################################\n############################# Control de Usuarios ##########################################\n###############################################################################################################\ndef cerrar_sesion(request):\n logout(request)\n return HttpResponseRedirect('/login/')\n\ndef inicio_sesion(request):\n if request.method == 'POST':\n form = AuthenticationForm(request.POST)\n if form.is_valid:\n usuario = request.POST['usuario']\n passw = request.POST['password']\n access = authenticate(username=usuario, password=passw)\n if access is not None:\n login(request, access)\n return HttpResponseRedirect('/dash/')\n else:\n messages.add_message(request, messages.ERROR, u'El usuario y/o contraseña no son validos')\n return HttpResponseRedirect('/login/')\n else:\n if not request.user.is_authenticated():\n form = AuthenticationForm()\n else:\n return HttpResponseRedirect('/dash/')\n return render(request, 'login.html', {'formulario':form})\n\n###############################################################################################################\n\n@login_required(login_url='/login/')\ndef dashboard(request):\n perfil = PerfilUsuario.objects.get(usuario = request.user)\n request.session.set_expiry(600)\n return render(request, 'dashboard.html', {'perfil' : perfil})\n\n\n@login_required(login_url='/login/')\ndef lista_carreras(request):\n perfil = PerfilUsuario.objects.get(usuario = request.user)\n if perfil.tipo == 2 :\n return render(request, 'lista_carreras.html', {'carreras' : Carrera.objects.all(), 'perfil' : perfil })\n elif perfil.tipo == 4:\n return Http404()\n else:\n return Http404()\n\n@login_required(login_url='/login/')\ndef docente_perfil(request, doc):\n perfil = PerfilUsuario.objects.get(usuario=User.objects.get(pk=doc))\n docencias = Docencia.objects.filter(docente=perfil.usuario)\n couch = Couching.objects.get(docentes_asignados=perfil.usuario)\n return render(request, 'docente_perfil.html', {'docencias' : docencias, 'perfil_docente' : perfil, 'perfil' : PerfilUsuario.objects.get(usuario = request.user), 'couch' : couch})\n\n@login_required(login_url='/login/')\ndef asignaciones(request):\n perfil = PerfilUsuario.objects.get(usuario = request.user)\n docentes = Couching.objects.get(couch = perfil.usuario).docentes_asignados.all()\n return render(request, 'couching.html', {'perfil' : perfil,'docentes' : docentes})\n\n@login_required(login_url='/login/')\ndef editar_docencia(request, id):\n perfil = PerfilUsuario.objects.get(usuario = request.user)\n docencia = get_object_or_404(Docencia, pk = id)\n form = DocenciaFormulario(request.POST or None, request.FILES, instance = docencia)\n if request.method == 'POST' and form.is_valid():\n form.save()\n return HttpResponseRedirect('/dash/')\n return render(request, 'docencia.html', {'perfil' : perfil, 'docencia' : docencia, 'form' : form })\n","sub_path":"sicam/couching/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"125375955","text":"from collections import defaultdict\n\nimport boto3\nfrom time import sleep, time\n\nfrom dataclasses import dataclass\n\nfrom logs.log import logger\nfrom kubernetes import client, config\n\n\n@dataclass(repr=False)\nclass Options:\n sqs_queue_url: str = \"\"\n sqs_queue_name: str = \"\"\n kubernetes_deployment: str = \"\"\n kubernetes_namespace: str = \"\"\n kubernetes_deployment_selector: str = \"\"\n aws_region: str = \"\"\n poll_period: int = 10\n scale_down_cool_down: int = 10\n scale_up_cool_down: int = 10\n scale_up_messages: int = 20\n scale_down_messages: int = 10\n max_pods: int = 10\n min_pods: int = 1\n\n\nclass SQSPoller:\n\n options = None\n sqs_client = None\n extensions_v1_beta1 = None\n last_message_count = None\n\n def __init__(self, options: Options):\n self.options = options\n self.sqs_client = boto3.client(\"sqs\", region_name=options.aws_region)\n\n if not self.options.sqs_queue_url:\n # derive the URL from the queue name\n self.options.sqs_queue_url = self.sqs_client.get_queue_url(\n QueueName=self.options.sqs_queue_name\n )[\"QueueUrl\"]\n\n config.load_incluster_config()\n self.extensions_v1_beta1 = client.ExtensionsV1beta1Api()\n self.last_scale_up_time = defaultdict(time)\n self.last_scale_down_time = defaultdict(time)\n\n def message_counts(self):\n response = self.sqs_client.get_queue_attributes(\n QueueUrl=self.options.sqs_queue_url,\n AttributeNames=[\"ApproximateNumberOfMessages\", \"ApproximateNumberOfMessagesNotVisible\"],\n )\n message_count = int(response[\"Attributes\"][\"ApproximateNumberOfMessages\"])\n invisible_message_count = int(\n response[\"Attributes\"][\"ApproximateNumberOfMessagesNotVisible\"]\n )\n return message_count, invisible_message_count\n\n def poll(self):\n message_count, invisible_message_count = self.message_counts()\n t = time()\n for deployment in self.deployments():\n name = deployment.metadata.name\n logger.info(\"Checking deployment %s\", name)\n if message_count >= self.options.scale_up_messages:\n if t - self.last_scale_up_time[name] > self.options.scale_up_cool_down:\n self.scale_up(deployment)\n self.last_scale_up_time[name] = t\n else:\n logger.debug(\"Waiting for scale up cooldown\")\n if message_count <= self.options.scale_down_messages:\n # special case - do not scale to zero unless there are no invisible messages\n if (\n invisible_message_count > 0\n and deployment.spec.replicas <= invisible_message_count\n ):\n logger.debug(\"Not scaling down because messages are still in-flight\")\n elif t - self.last_scale_down_time[name] > self.options.scale_down_cool_down:\n self.scale_down(deployment)\n self.last_scale_down_time[name] = t\n else:\n if deployment.spec.replicas > self.options.min_pods:\n logger.debug(\"Waiting for scale down cooldown\")\n\n # code for scale to use msg_count\n sleep(self.options.poll_period)\n\n def scale_up(self, deployment):\n if deployment.spec.replicas < self.options.max_pods:\n deployment.spec.replicas += 1\n logger.info(\"Scaling up to %d\" % deployment.spec.replicas)\n self.update_deployment(deployment)\n elif deployment.spec.replicas > self.options.max_pods:\n self.scale_down(deployment)\n else:\n logger.debug(\"Max pods reached\")\n\n def scale_down(self, deployment):\n if deployment.spec.replicas > self.options.min_pods:\n deployment.spec.replicas -= 1\n logger.info(\"Scaling down to %d\" % deployment.spec.replicas)\n self.update_deployment(deployment)\n elif deployment.spec.replicas < self.options.min_pods:\n self.scale_up(deployment)\n else:\n logger.debug(\"Min pods reached\")\n\n def deployments(self):\n logger.debug(\n \"loading deployments: %s from namespace: %s\",\n self.options.kubernetes_deployment or self.options.kubernetes_deployment_selector,\n self.options.kubernetes_namespace,\n )\n if self.options.kubernetes_deployment_selector:\n selector = self.options.kubernetes_deployment_selector\n else:\n selector = \"app={}\".format(self.options.kubernetes_deployment)\n logger.debug(\"Selector is %s\", selector)\n deployments = self.extensions_v1_beta1.list_namespaced_deployment(\n self.options.kubernetes_namespace, label_selector=selector\n )\n return deployments.items\n\n def update_deployment(self, deployment):\n # Update the deployment\n api_response = self.extensions_v1_beta1.patch_namespaced_deployment(\n name=deployment.metadata.name,\n namespace=self.options.kubernetes_namespace,\n body=deployment,\n )\n logger.debug(\"Deployment updated. status='%s'\" % str(api_response.status))\n\n def run(self):\n options = self.options\n logger.debug(\n \"Starting poll for %s every %d seconds\", options.sqs_queue_url, options.poll_period\n )\n while True:\n self.poll()\n\n\ndef run(options):\n \"\"\"\n poll_period is set as as part of k8s deployment env variable\n sqs_queue_url is set as as part of k8s deployment env variable\n \"\"\"\n SQSPoller(Options(**options.__dict__)).run()\n","sub_path":"sqs/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"92053108","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 24 08:11:05 2019\n\n@author: imad\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\nclass PDR(object):\n def __init__(self, X, y, classifier, resolution=0.02):\n \n \"\"\"\n A decission region plotter.\n \n Parameters\n ----------\n X : {array-like}, shape = [n_samples,n_features]\n Training dataset containing feature vectors.\n \n y : {array-like}, shape = [n_samples]\n Target class labels for the samples in X.\n \n classifier : object\n The classifier object.\n \n resolution : \n \n \n \n ----------\n \"\"\"\n \n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n \n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n \n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n a = (y == cl).ravel()\n xx = []\n yy = []\n for i in range(len(a)):\n \n if a.ndim == 2:\n if a[i, 0]:\n xx.append(X[i, 0])\n yy.append(X[i, 1])\n elif a.ndim == 1:\n if a[i]:\n xx.append(X[i, 0])\n yy.append(X[i, 1])\n else:\n print(\"Dimension of a not handled\")\n plt.scatter(x=xx, \n y=yy,\n alpha=0.8, \n c=colors[idx],\n marker=markers[idx], \n label=cl, \n edgecolor='black')\n \n \n","sub_path":"plot_decission_regions.py","file_name":"plot_decission_regions.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"33800864","text":"\nimport argparse\n\nfrom myhdl import (Signal, intbv, always_seq, always_comb)\n\nfrom rhea.build.boards import get_board\n\n\ndef icestick_blinky(led, clock, reset=None):\n \"\"\" simple icestick LED blink \"\"\"\n assert len(led) == 5\n\n maxcnt = int(clock.frequency)\n cnt = Signal(intbv(0, min=0, max=maxcnt))\n toggle = Signal(bool(0))\n\n @always_seq(clock.posedge, reset=None)\n def rtl():\n if cnt == maxcnt-1:\n toggle.next = not toggle\n cnt.next = 0\n else:\n cnt.next = cnt + 1\n\n @always_comb\n def rtl_assign():\n led.next[0] = toggle\n led.next[1] = not toggle\n for ii in range(2, 5):\n led.next[ii] = 0\n\n return rtl, rtl_assign\n\n\ndef build(args):\n brd = get_board('icestick')\n flow = brd.get_flow(top=icestick_blinky)\n flow.run()\n\n\ndef cliparse():\n parser = argparse.ArgumentParser()\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = cliparse()\n build(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/boards/icestick/blinky.py","file_name":"blinky.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"638024566","text":"\"\"\"\n Feed of SMTP greetings from dataplane with IPs and ASN\n\"\"\"\nimport logging\nfrom datetime import datetime, timedelta\n\nimport pandas as pd\nfrom core.errors import ObservableValidationError\nfrom core.feed import Feed\nfrom core.observables import AutonomousSystem, Ip\n\n\nclass DataplaneSMTPGreet(Feed):\n \"\"\"\n Feed of SMTP greetings from dataplane with IPs and ASN\n \"\"\"\n\n default_values = {\n \"frequency\": timedelta(hours=2),\n \"name\": \"DataplaneSMTPGreet\",\n \"source\": \"https://dataplane.org/smtpgreet.txt\",\n \"description\": \"Entries below are records of source IP addresses that have been identified as SMTP clients issuing unsolicited HELO or EHLO commands.\",\n }\n\n def update(self):\n resp = self._make_request(sort=False)\n lines = resp.content.decode(\"utf-8\").split(\"\\n\")[68:-5]\n columns = [\"ASN\", \"ASname\", \"ipaddr\", \"lastseen\", \"category\"]\n df = pd.DataFrame([l.split(\"|\") for l in lines], columns=columns)\n\n for c in columns:\n df[c] = df[c].str.strip()\n df = df.dropna()\n df[\"lastseen\"] = pd.to_datetime(df[\"lastseen\"])\n if self.last_run:\n df = df[df[\"lastseen\"] > self.last_run]\n for count, row in df.iterrows():\n self.analyze(row)\n\n def analyze(self, item):\n context_ip = {\n \"source\": self.name,\n \"last_seen\": item[\"lastseen\"],\n \"date_added\": datetime.utcnow(),\n }\n\n try:\n ip = Ip.get_or_create(value=item[\"ipaddr\"])\n ip.add_context(context_ip, dedup_list=[\"date_added\"])\n ip.add_source(self.name)\n ip.tag(\"dataplane\")\n ip.tag(\"smtp\")\n ip.tag(\"scanning\")\n ip.tag(item[\"category\"])\n\n asn = AutonomousSystem.get_or_create(value=item[\"ASN\"])\n context_ans = {\"source\": self.name, \"name\": item[\"ASname\"]}\n asn.add_context(context_ans, dedup_list=[\"date_added\"])\n asn.add_source(self.name)\n asn.tag(\"dataplane\")\n asn.active_link_to(ip, \"AS\", self.name)\n except ObservableValidationError as e:\n logging.error(e)\n","sub_path":"plugins/feeds/public/dataplane_smtpgreet.py","file_name":"dataplane_smtpgreet.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"163228975","text":"\"\"\"Alter Reply Table rename column\n\nRevision ID: 4bd7c589e859\nRevises: b2de0aa95666\nCreate Date: 2018-12-09 20:32:01.371518\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '4bd7c589e859'\ndown_revision = 'b2de0aa95666'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('reply', sa.Column('mail', mysql.JSON(none_as_null=True), nullable=False))\n op.drop_column('reply', 'email')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('reply', sa.Column('email', mysql.JSON(), nullable=False))\n op.drop_column('reply', 'mail')\n # ### end Alembic commands ###\n","sub_path":"database/versions/4bd7c589e859_alter_reply_table_rename_column.py","file_name":"4bd7c589e859_alter_reply_table_rename_column.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"81542472","text":"import cv2 as cv\nimport numpy as np\nimport math\n\nfrom sklearn import datasets, svm, metrics\nfrom sklearn.model_selection import train_test_split\n\n\n\ndigits = datasets.load_digits()\nn_samples = len(digits.images)\ndata = digits.images.reshape((n_samples, -1)) \nclassifier = svm.SVC(gamma=0.001)\nX_train, _, y_train,_ = train_test_split(\n data, digits.target, test_size=1, shuffle=False)\nclassifier.fit(X_train, y_train)\ncv.startWindowThread()\ncap = cv.VideoCapture('sentry3.mkv')\nframes=1\nfourcc = cv.VideoWriter_fourcc(*'mp4v')\nout = cv.VideoWriter('output.mp4', fourcc, 10.0, (1440,810),True)\n\nwhile(frames<29):\n\n ret, frame = cap.read()\n cv.waitKey(1)\n frames=frames+1\nwhile (frames<179):\n ret, frame = cap.read()\n cv.waitKey(1)\n frames=frames+1\n frame2=frame.copy()\n Mask=cv.inRange(frame2,np.array([0, 0, 0]),np.array([50,50,50]))\n Mask=cv.bitwise_not(Mask)\n kernel = np.ones((5,5),np.uint8)\n\n dilation = cv.dilate(Mask,kernel,iterations = 1)\n erosion = cv.erode(dilation,kernel,iterations = 13)\n Mask=cv.bitwise_not(erosion)\n contours, hierarchy = cv.findContours(Mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n # frame4=np.zeros(frame.shape,dtype=np.int8)\n for i in contours:\n frame4=np.zeros(frame.shape,dtype=np.uint8)\n x1,y1,w1,h1= cv.boundingRect(i)\n \n rect=cv.minAreaRect(i)\n \n box=cv.boxPoints(rect)\n \n box=np.int0(box)\n centre=rect[0]\n cv.rectangle(frame2,(x1,y1),(x1+w1,y1+h1),(20, 255, 57),2)\n cv.drawContours(frame4, [box],0,(255,255,255),-1)\n # cv.drawContours(frame2, [box],0,(20, 255, 57),2)\n roi=cv.bitwise_and(frame,frame4)\n ekkaurmask=cv.inRange(roi,np.array([0, 0,200]),np.array([255,255,255]))\n ekkaurmask = cv.dilate(ekkaurmask,kernel,iterations = 15)\n frame1=cv.bitwise_and(roi,roi,mask=ekkaurmask)\n whitemask=cv.inRange(frame1,np.array([130, 130,130]),np.array([255,255,205]))\n \n contour, _ = cv.findContours(whitemask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n if len(contour)!=0:\n c = max(contour, key = cv.contourArea)\n x,y,w,h = cv.boundingRect(c)\n if(w>5 or h>5):\n font = cv.FONT_HERSHEY_COMPLEX \n imgs=frame[y:y+h, x:x+w]\n framee= cv.resize(imgs,(8,8),interpolation=cv.INTER_LINEAR)\n framee=cv.cvtColor(framee,cv.COLOR_BGR2GRAY)\n framee=framee/16\n test=np.asarray(framee,dtype=\"int32\")\n new=np.asarray(test,dtype=\"float32\")\n predicted = classifier.predict([new.reshape(-1)])\n if ((predicted[0]==1)or (predicted[0]==9) or (predicted[0]==7)):\n # cv.circle(frame2,, 7, (255,0,0), -1)\n \n cv.putText(frame2, 'Red 1', (x1,int (y1-4)), font,0.7, (20, 255, 57),2) \n if predicted[0]==2:\n cv.putText(frame2, 'Red 2', (x1,int (y1-4)), font,0.7, (20, 255, 57),2)\n else: \n print(box)\n # cv.drawContours(frame2, [box],0,(255,255,255),2)\n \n cv.imshow('lool',frame2)\n out.write(frame2)\n k = cv.waitKey(1) & 0xff\n if k == 27 : break\n ","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"636732039","text":"import os\nimport requests\nimport flask\nimport redis\nimport datetime\nimport json\n\nclass RedisProvider(object):\n def __init__(self, items: list=[]):\n self._items = items\n self.formatted_results = []\n \n \n def getLeaderboard(self) -> str:\n self.formatted_results = []\n #Connect to redis\n pool = redis.ConnectionPool(host=os.environ['REDIS_SERVER'], port=6379, db=0)\n r = redis.Redis(connection_pool=pool)\n \n # Run zrevrange from docs zrevrange(name, start, end, withscores=False, score_cast_func=)\n raw_results = r.zrevrange(str('score.quizscore:'+datetime.datetime.today().strftime('%Y-%m-%d')) , 0, 5, withscores=True) \n #Format results\n for score in raw_results:\n self.formatted_results.append([score[0].decode(\"utf-8\"), score[1]])\n \n\n return json.dumps(self.formatted_results), 200\n\n def getTodaysCurrentChampion(self) -> str:\n #Connect to redis\n pool = redis.ConnectionPool(host=os.environ['REDIS_SERVER'], port=6379, db=0)\n r = redis.Redis(connection_pool=pool)\n \n # Run zrevrange from docs zrevrange(name, start, end, withscores=False, score_cast_func=)\n results = r.zrevrange(str('score.quizscore:'+datetime.datetime.today().strftime('%Y-%m-%d')) , 0, 5, withscores=True) \n \n #Format results - Take winning player (first tuple) \n score = results[0]\n return \"Todays champion is : \\n User : \"+str(score[0].decode(\"utf-8\"))+ \" with Score : \"+str(score[1]), 200\n\n def setPlayerScore(self, productPayload) -> str:\n #Connect to redis\n pool = redis.ConnectionPool(host=os.environ['REDIS_SERVER'], port=6379, db=0)\n r = redis.Redis(connection_pool=pool)\n\n r.zadd('score.quizscore:'+datetime.datetime.today().strftime('%Y-%m-%d'),productPayload['score'], productPayload['userid'] )\n r.zadd('score.quizscore', productPayload['score'],productPayload['userid'] )\n\n return \"Success\", 201","sub_path":"services/RedisProvider.py","file_name":"RedisProvider.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"161014518","text":"import pickle\n\nvVar=pickle.load( open( \"SOURCE/_var.pick\", \"rb\" ) )\nvRoom=pickle.load( open( \"SOURCE/_room.pick\", \"rb\" ) )\n\naR=list(filter(lambda x: x.later==True, vRoom))\nbR=list(filter(lambda x: x.later==False, vRoom))\nvRoom=bR+aR\n\n# обработка \"виртуальной команды\" next\n# преобразование её в goto roomname_следующей_комнаты\nif len(vRoom)>1:\n\tfor RID in range(0,len(vRoom)):\n\t\tR=vRoom[RID]\n\t\tfor AID in range(0,len(R.vAct)):\n\t\t\tA=R.vAct[AID]\n\t\t\tfor CID in range(0,len(A.vComm)):\n\t\t\t\tC=A.vComm[CID]\n\t\t\t\tif C.name==\"next\":\n\t\t\t\t\tif (RID+1)>=(len(vRoom)):\n\t\t\t\t\t\tprint(\"ERR: can't refer to NEXT room\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tC.name=\"goto\"\n\t\t\t\t\t\tC.arg=vRoom[RID+1].roomname\n\t\t\t\t\t\tprint(C)\n\nprint(vVar)\n\nactLater=[]\n\n# счетчик инструкций и функция ip() - только для отладки\ninstruction=0\n\n# переменная хранит итоговый исходный код\ntotal=\"\"\n\nvTotal=[]\n\n\ndef quote(s):\n\treturn \"\\\"\"+s+\"\\\"\"\n\ndef skobe(a=\"\"):\n\treturn \"(\"+a+\")\"\n\ndef call(fname,a=None,b=None):\n\tglobal vTotal\n\t\n\tvTotal.append((fname,a,b))\n\t#~ if a!=None and b!=None:\n\t\t#~ vTotal.append((fname,a,b))\n\t\n\t#~ if a!=None and b==None:\n\t\t#~ vTotal.append((fname,a))\n\t#~ if a==None and b==None:\n\t\t#~ vTotal.append((fname))\n\n\ndef ip(val):\n\tglobal instruction\n\tinstruction+=val\n\ndef declvar(varname,val):\n\tif val==True:\n\t\tcall(\"byte1\",varname)\n\telse:\n\t\tcall(\"byte0\",varname)\n\t\n\tip(+1)\n\ndef rest():\n\tcall(\"rest\")\n\tip(+1)\n\t\ndef condjmp(s):\n\tcall(\"condjmp\",s)\n\tip(+1+3)\n\ndef lab(s):\n\tcall(\"label\",s)\n\ndef setName(s):\n\tcall(\"setname\",s)\n\tip(+1+3)\n\nimport hashlib\n\ndef room(R):\n\tlab(R.roomname)\n\tsetName(R.anchor)\n\n#генерируем блок переменных\nrest()\ncondjmp(\"mainprogram\")\nfor V in vVar:\n\tdeclvar(V,vVar[V])\nlab(\"mainprogram\")\nrest()\n\n\n\n\ndef onv(s):\n\tcall(\"onv\",s)\n\tip(+1+3)\n\t\ndef offv(s):\n\tcall(\"offv\",s)\n\tip(+1+3)\n\ndef condtext(CT):\n\tcall(\"condtxt\",CT.anchor)\n\tip(+1+3)\n\nmessXanchor={}\n\ndef condmes(CT):\n\tcall(\"condmes\",messXanchor[CT])\n\tip(+1+3)\n\ndef nop():\n\tcall(\"nop\")\n\tip(+1)\n\n\ndef interpCond(A):\n\trest()\n\tif A.trueVec==None and A.falseVec==None:\n\t\tonv(\"always\")\n\t\n\tfor ct in A.trueVec:\n\t\tonv(ct)\n\tfor ct in A.falseVec:\n\t\toffv(ct)\n\n\ndef condset(s):\n\tcall(\"condset\",s)\n\tip(+1+3)\n\t\ndef condunset(s):\n\tcall(\"condunset\",s)\n\tip(+1+3)\n\ndef interpComm(A):\n\tfor C in A.vComm:\n\t\tnam=C.name.strip()\n\t\tprint(nam)\n\t\tif \"goto\"==nam:\n\t\t\tcondjmp(C.arg)\n\t\tif \"nothing\"==nam:\n\t\t\tnop()\n\t\tif \"set\"==nam:\n\t\t\tcondset(C.arg)\n\t\tif \"unset\"==nam:\n\t\t\tcondunset(C.arg)\n\t\tif \"return\"==nam:\n\t\t\tcondret()\n\t\tif \"mes\"==nam:\n\t\t\tcondmes(C.arg)\n\n\n\ndef text(A):\n\tinterpCond(A)\n\tcondtext(A)\n\tinterpComm(A)\n\n\ndef condact(A):\n\tcall(\"condact\",A.anchor)\n\tip(+1+3)\n\ndef act(A):\n\tinterpCond(A)\n\tcondact(A)\n\t\n\tglobal actLater\n\tactLater.append(A)\n\n\ndef waitKey():\n\tcall(\"waitkey\")\n\tip(+1)\n\t\ndef condret():\n\tcall(\"condret\")\n\tip(+1)\n\ndef eq(s):\n\tcall(\"eq\",s)\n\tip(+1+3)\n\ndef end():\n\twaitKey()\n\t\n\tglobal actLater\n\tfor A in actLater:\n\t\trest()\n\t\teq(A.anchor)\n\t\tprint(A)\n\t\tinterpComm(A)\n\t\n\tactLater.clear()\n\n\n\n\n\ndef UUID():\n\timport os\n\t# генерация случайных строк вида f114b8379a7eebf2870a1ec9770c139a\n\treturn hashlib.md5(os.urandom(32)).hexdigest().lower()\n\nfor R in vRoom:\n\troom(R)\n\trest()\n\t\n\t#объявление текстов в комнате\n\tcondjmp(R.roomname+\"_decl\")\n\tfor T in (R.vText+R.vAct):\n\t\tcall(\"decl\",T.anchor,T.text)\n\t\tip(+len(T.text)+1)\n\t\n\t\n\t# объявление данных для команды mes\n\tfor T in (R.vText+R.vAct):\n\t\tfor C in T.vComm:\n\t\t\tprint(dir(C))\n\t\t\tif C.name==\"mes\":\n\t\t\t\tmessXanchor[C.arg]=UUID()\n\t\t\t\tcall(\"decl\", messXanchor[C.arg] ,C.arg)\n\t\n\tcall(\"decl\",R.anchor,R.roomname)\n\tlab(R.roomname+\"_decl\")\n\t\t\n\tfor T in R.vText:\n\t\ttext(T)\n\t\t\t\n\tfor A in R.vAct:\n\t\tact(A)\n\n\tend()\n\nimport sys\nfor x in vTotal:\n\tsys.stdout.write(x[0]+\" \")\n\tif x[1]:\n\t\tsys.stdout.write(x[1]+\" \")\n\tif x[2]:\n\t\tsys.stdout.write(x[2]+\" \")\n\tprint(\"\")\n\nimport pickle\n\npickle.dump( vTotal, open( \"SOURCE/_quest.pick\", \"wb\" ) )\n\n","sub_path":"lib1.py","file_name":"lib1.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"528462085","text":"#coding:utf-8\r\nimport json\r\nimport sys\r\nsys.path.append(r'C:\\Users\\Administrator\\PycharmProjects\\myfirst_python\\my_python_requests')\r\nfrom util.operation_xls import OperationXls\r\nfrom base.runmethod import RunMethod\r\nfrom data.get_data import GetData\r\nfrom jsonpath_rw import jsonpath,parse\r\nclass DependdentData:\r\n def __init__(self,case_id):\r\n self.case_id = case_id\r\n self.opera_xls = OperationXls()\r\n self.data = GetData()\r\n # 通过case_id去获取该case_id的整行数据\r\n def get_case_line_data(self):\r\n rows_data = self.opera_xls.get_rows_data(self.case_id)\r\n return rows_data\r\n # 执行依赖测试,获取结果\r\n def run_dependent(self):\r\n run_method = RunMethod()\r\n row_num = self.opera_xls.get_row_num(self.case_id)\r\n request_data = self.data.get_data_for_json(row_num)\r\n method = self.data.get_request_method(row_num)\r\n url = self.data.get_request_url(row_num)\r\n res = run_method.run_main(method,url,request_data)\r\n return json.loads(res)\r\n #根据依赖的key去获取执行依赖测试case的响应,然后返回\r\n def get_data_for_key(self,row):\r\n depend_data = self.data.get_depend_key(row)\r\n response_data = self.run_dependent()\r\n json_exe = parse(depend_data)\r\n madle = json_exe.find(response_data)\r\n return [math.value for math in madle][0]\r\n\r\nif __name__ == '__main__':\r\n res = \"orders.id\"\r\n res2 = json.loads(res)\r\n json_exe = parse(res)\r\n madle = json_exe.find(order)\r\n print([math.value for math in madle][0])\r\n","sub_path":"dependent_data.py","file_name":"dependent_data.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"496014001","text":"#!/Users/11834/.conda/envs/Pytorch_GPU/python.exe\n# -*- coding: UTF-8 -*-\n'''=================================================\n@Project -> File :FWorks -> feature_generation\n@IDE :PyCharm\n@Date :2020/12/6 21:20\n=================================================='''\nimport os\nimport numpy as np\nfrom configparser import ConfigParser\nfrom Util.processing_pssm_msaTopsfm import Processing_PSSM_MSAToPSFM\n\nconfig = ConfigParser()\nconfig.read('DMVFL-RSA.config')\nHHBLITS_EXE = config.get('HHBLITS', 'HHBLITS_EXE')\nHHBLITS_DB = config.get('HHBLITS', 'HHBLITS_DB')\n\n\nclass FeaturesGeneration(object):\n def __init__(self, pro_name, sequence, result_path):\n seq_path = os.path.join(result_path, pro_name)\n with open(seq_path, \"w\") as f:\n f.write(\">\" + pro_name.strip() + \"\\n\" + sequence.strip())\n self.seq_path = seq_path\n self.pro_name = pro_name.strip()\n self.seq = sequence.strip()\n self.result_path = result_path\n self.msa_path = os.path.join(self.result_path, self.pro_name + \".a3m\")\n self.PSFM_path = os.path.join(self.result_path, self.pro_name + \".psfm\")\n self.PRSA_path = os.path.join(self.result_path, self.pro_name + \".temples\")\n self.PSS_path = os.path.join(self.result_path, self.pro_name + \".ss\")\n self.PSSM_path = os.path.join(self.result_path, self.pro_name + \".opssm\")\n\n def PSSM_PSS_generation(self):\n if os.path.exists(self.PSSM_path) and os.path.exists(self.PSSM_path):\n pass\n else:\n PSSM_PSS_cmd = \"java -jar GeneratePSSM_PSS_PSA.jar \" + self.result_path + \" \" + self.seq_path + \" \" + str(\n 0) + \" \" + str(1)\n os.system(PSSM_PSS_cmd)\n return self.seq_path\n\n def msa_generation(self):\n msa_cmd = HHBLITS_EXE + ' -i ' + self.seq_path + ' -d ' + HHBLITS_DB + ' -n ' + str(5) + ' -e ' + str(\n 0.1) + ' -cov ' + str(80) + ' -id ' + str(90) + ' -oa3m ' + self.msa_path\n os.system(msa_cmd)\n\n def PSFM_generation(self):\n if os.path.exists(self.PSFM_path):\n print(\"PSFM have existed!\")\n elif os.path.exists(self.msa_path):\n print(\"MSA have existed!\")\n MSAToPSFM = Processing_PSSM_MSAToPSFM()\n PSFM = MSAToPSFM.NumericMSAToPSFM(self.msa_path)\n np.savetxt(self.PSFM_path, PSFM, fmt='%.04f')\n else:\n self.msa_generation()\n MSAToPSFM = Processing_PSSM_MSAToPSFM()\n PSFM = MSAToPSFM.NumericMSAToPSFM(self.msa_path)\n np.savetxt(self.PSFM_path, PSFM, fmt='%.04f')\n\n def Threading_based_PRSA(self, cutoff_threshold=0.5, iter_num=1):\n\n PRSA_cmd = \"java -jar JPSFMThreader.jar \" + self.pro_name + \" \" + self.seq + \" \" + str(\n cutoff_threshold) + \" \" + str(iter_num) + \" \" + self.PSFM_path + \" \" + self.PRSA_path\n os.system(PRSA_cmd)\n","sub_path":"feature_generation.py","file_name":"feature_generation.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552567226","text":"import hashlib\nimport hmac\n\nfrom django.conf import settings\n\n\n__all__ = ['TemplateTaskError', 'TemplateTask', 'MultipleChoiceTask', 'Choice',\n 'QuestionTask', 'task_classes']\n\n\nclass TemplateTaskError(Exception):\n pass\n\n\nclass TemplateTask:\n def __init__(self, identifier):\n self.identifier = identifier\n\n def check_for_errors(self):\n raise NotImplemented\n\n def validate(self, values):\n raise NotImplemented\n\n def extract_values(self, user, scenario, form_values):\n raise NotImplemented\n\n def get_mac(self, user, scenario):\n mac_msg = 'templatetask:{}:{}:{}'.format(user.pk, scenario.pk, self.identifier)\n return hmac.new(settings.SECRET_KEY.encode(), mac_msg.encode(),\n hashlib.sha256).hexdigest()\n\n\nclass MultipleChoiceTask(TemplateTask):\n task_type = 'multiple_choice'\n\n def __init__(self, identifier, **kwargs):\n super().__init__(identifier)\n self.choices = {}\n\n def check_for_errors(self):\n if not self.choices:\n raise TemplateTaskError('Empty choices')\n\n def extract_values(self, user, scenario, form_values):\n values = {}\n for choice in self.choices.values():\n choice_mac = choice.get_mac(user, scenario, self.identifier)\n values[choice.name] = bool(form_values.get(choice_mac))\n return values\n\n def validate(self, values):\n for choice in self.choices.values():\n if values[choice.name] != choice.correct:\n return False\n return True\n\n def __repr__(self):\n return '{}(choices={!r})'.format(self.__class__.__name__, self.choices)\n\n\nclass SingleChoiceTask(TemplateTask):\n task_type = 'single_choice'\n\n def __init__(self, identifier, **kwargs):\n super().__init__(identifier)\n self.choices = {}\n\n def check_for_errors(self):\n if not self.choices:\n raise TemplateTaskError('Empty choices')\n\n num_correct = 0\n for choice in self.choices.values():\n if choice.correct:\n num_correct += 1\n if num_correct != 1:\n raise TemplateTaskError('Require exactly 1 correct answer.')\n\n def extract_values(self, user, scenario, form_values):\n values = {'answer': None}\n for choice in self.choices.values():\n choice_mac = choice.get_mac(user, scenario, self.identifier)\n if form_values.get('answer') == choice_mac:\n values['answer'] = choice.name\n break\n return values\n\n def validate(self, values):\n if values['answer'] is None:\n return False\n return self.choices[values['answer']].correct\n\n def __repr__(self):\n return '{}(choices={!r})'.format(self.__class__.__name__, self.choices)\n\n\nclass Choice:\n def __init__(self, name, correct=False, **kwargs):\n self.name = name\n self.correct = correct\n\n def get_mac(self, user, scenario, task_identifier):\n mac_msg = 'choice:{}:{}:{}:{}'.format(user.pk, scenario.pk, task_identifier, self.name)\n return hmac.new(settings.SECRET_KEY.encode(), mac_msg.encode(),\n hashlib.sha256).hexdigest()\n\n def __repr__(self):\n return '{}(name={!r}, correct={!r})'.format(self.__class__.__name__,\n self.name,\n self.correct)\n\n\nclass QuestionTask(TemplateTask):\n task_type = 'question'\n\n def __init__(self, identifier, case_sensitive=False, strip=True, **kwargs):\n super().__init__(identifier)\n self.answers = []\n self.case_sensitive = case_sensitive\n self.strip = strip\n\n def check_for_errors(self):\n if not self.answers:\n raise TemplateTaskError('No answers')\n if not isinstance(self.case_sensitive, bool):\n raise TemplateTaskError('Attribute \"case_sensitive\" must be of type bool')\n if not isinstance(self.strip, bool):\n raise TemplateTaskError('Attribute \"strip\" must be of type bool')\n\n def extract_values(self, user, scenario, form_values):\n return {'answer': form_values.get('answer', '')}\n\n def validate(self, values):\n answer = values['answer']\n if self.strip:\n answer = answer.strip()\n if not self.case_sensitive:\n answer = answer.lower()\n\n for expected_answer in self.answers:\n if self.strip:\n expected_answer = expected_answer.strip()\n if not self.case_sensitive:\n expected_answer = expected_answer.lower()\n if answer == expected_answer:\n return True\n return False\n\n def __repr__(self):\n return '{}(answers={!r}, case_sensitive={!r}, strip={!r})'.format(\n self.__class__.__name__, self.answers, self.case_sensitive, self.strip)\n\n\ntask_classes = {\n MultipleChoiceTask.task_type: MultipleChoiceTask,\n SingleChoiceTask.task_type: SingleChoiceTask,\n QuestionTask.task_type: QuestionTask\n}\n","sub_path":"insekta/insekta/scenarios/dsl/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"287606422","text":"from math import ceil\n\nfrom veriloggen import *\nfrom veriloggen.types.util import log2\n\n\n# pe_conf packt = {op_conf,id_pe,conf_data}\n\ndef make_control_conf(cgra_id, num_pe_io, num_cicle_wait_conf_finish):\n m = Module('cgra%d_control_conf' % cgra_id)\n\n clk = m.Input('clk')\n rst = m.Input('rst')\n start = m.Input('start')\n\n available_read = m.Input('available_read')\n req_rd_data = m.Output('req_rd_data')\n rd_data = m.Input('rd_data', 512)\n rd_data_valid = m.Input('rd_data_valid')\n\n conf_out_bus = m.OutputReg('conf_out_bus', 64)\n\n read_fifo_mask = m.OutputReg('read_fifo_mask', num_pe_io)\n write_fifo_mask = m.OutputReg('write_fifo_mask', num_pe_io)\n\n done = m.OutputReg('done')\n\n FSM_INIT_CTRL_IDLE = m.Localparam('FSM_INIT_CTRL_IDLE', 0)\n FSM_INIT_CTRL_INIT = m.Localparam('FSM_INIT_CTRL_INIT', 1)\n FSM_SEND_INIT_CONF_PE = m.Localparam('FSM_SEND_INIT_CONF_PE', 2)\n FSM_INIT_CTRL_WAIT_DATA = m.Localparam('FSM_INIT_CTRL_WAIT_DATA', 3)\n FSM_INIT_CTRL_REQ_DATA = m.Localparam('FSM_INIT_CTRL_REQ_DATA', 4)\n FSM_INIT_CONF_DONE = m.Localparam('FSM_INIT_CONF_DONE', 5)\n FSM_WAIT_ALL_CONF_FINISH = m.Localparam('FSM_WAIT_ALL_CONF_FINISH', 6)\n\n m.EmbeddedCode('')\n fsm_conf_ctrl = m.Reg('fsm_conf_ctrl', 3)\n fsm_conf_ctrl_next = m.Reg('fsm_conf_ctrl_next', 3)\n conf_req_data = m.Reg('conf_req_data')\n conf_cl = m.Reg('conf_cl', 512)\n qtd_conf = m.Reg('qtd_conf', 32)\n conf_data = m.Reg('conf_data', 64)\n send_conf = m.Reg('send_conf')\n conf_counter = m.Reg('conf_counter', 32)\n conf_counter_cl = m.Reg('conf_counter_cl', 4)\n wait_counter = m.Reg('wait_counter', int(ceil(log2(num_cicle_wait_conf_finish))) + 1)\n\n m.EmbeddedCode('')\n req_rd_data.assign(conf_req_data)\n\n m.Always(Posedge(clk))(\n If(rst)(\n fsm_conf_ctrl(FSM_INIT_CTRL_IDLE),\n fsm_conf_ctrl_next(FSM_INIT_CTRL_IDLE),\n conf_req_data(0),\n send_conf(0),\n conf_counter(0),\n conf_counter_cl(Int(8, conf_counter_cl.width, 10)),\n done(0),\n read_fifo_mask(0),\n write_fifo_mask(0),\n wait_counter(0)\n ).Else(\n conf_req_data(0),\n send_conf(0),\n Case(fsm_conf_ctrl)(\n When(FSM_INIT_CTRL_IDLE)(\n If(start)(\n fsm_conf_ctrl(FSM_INIT_CTRL_REQ_DATA),\n fsm_conf_ctrl_next(FSM_INIT_CTRL_INIT)\n )\n ),\n When(FSM_INIT_CTRL_INIT)(\n qtd_conf(conf_cl[0:32]),\n read_fifo_mask(conf_cl[32:32 + num_pe_io]),\n write_fifo_mask(conf_cl[64:64 + num_pe_io]),\n fsm_conf_ctrl(FSM_SEND_INIT_CONF_PE),\n ),\n When(FSM_SEND_INIT_CONF_PE)(\n If(conf_counter >= qtd_conf)(\n fsm_conf_ctrl(FSM_WAIT_ALL_CONF_FINISH)\n ).Else(\n If(conf_counter_cl < Int(8, conf_counter_cl.width, 10))(\n conf_data(conf_cl[0:64]),\n conf_cl(conf_cl[64:]),\n send_conf(1),\n conf_counter.inc(),\n conf_counter_cl.inc(),\n ).Else(\n conf_counter_cl(Int(0, conf_counter_cl.width, 10)),\n fsm_conf_ctrl(FSM_INIT_CTRL_REQ_DATA),\n fsm_conf_ctrl_next(FSM_SEND_INIT_CONF_PE)\n )\n )\n ),\n When(FSM_INIT_CTRL_REQ_DATA)(\n If(available_read)(\n conf_req_data(1),\n fsm_conf_ctrl(FSM_INIT_CTRL_WAIT_DATA)\n )\n ),\n When(FSM_INIT_CTRL_WAIT_DATA)(\n If(rd_data_valid)(\n conf_cl(rd_data),\n fsm_conf_ctrl(fsm_conf_ctrl_next),\n )\n ),\n When(FSM_WAIT_ALL_CONF_FINISH)(\n wait_counter.inc(),\n If(wait_counter > num_cicle_wait_conf_finish)(\n fsm_conf_ctrl(FSM_INIT_CONF_DONE)\n )\n ),\n When(FSM_INIT_CONF_DONE)(\n done(1)\n )\n )\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst)(\n conf_out_bus(0),\n ).Else(\n If(send_conf)(\n conf_out_bus(conf_data),\n ).Else(\n conf_out_bus(0)\n ),\n )\n )\n\n return m\n","sub_path":"fdam-hw-generator/src/fdam_cgra/make_control_conf.py","file_name":"make_control_conf.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"161635963","text":"import urllib, urllib.request\nimport json\n\ntry:\n import vim\nexcept:\n print(\"No vim module available outside vim\")\n pass\n\n\nimport openai\nfrom AUTH import *\n\nopenai.organization = ORGANIZATION_ID\nopenai.api_key = SECRET_KEY\nMAX_SUPPORTED_INPUT_LENGTH = 4096\nUSE_STREAM_FEATURE = True\nMAX_TOKENS_DEFAULT = 64\n\ndef complete_input_max_length(input_prompt, max_input_length=MAX_SUPPORTED_INPUT_LENGTH, stop=None, max_tokens=64):\n input_prompt = input_prompt[-max_input_length:]\n response = openai.Completion.create(engine='davinci-codex', prompt=input_prompt, best_of=1, temperature=0.5, max_tokens=max_tokens, stream=USE_STREAM_FEATURE, stop=stop)\n return response\n\ndef complete_input(input_prompt, stop, max_tokens):\n try:\n response = complete_input_max_length(input_prompt, int(2.5 * MAX_SUPPORTED_INPUT_LENGTH), stop=stop, max_tokens=max_tokens)\n except openai.error.InvalidRequestError:\n response = complete_input_max_length(input_prompt, MAX_SUPPORTED_INPUT_LENGTH, stop=stop, max_tokens=max_tokens)\n print('Using shorter input.')\n\n return response\n\ndef get_max_tokens():\n max_tokens = None\n if vim.eval('exists(\"a:max_tokens\")') == '1':\n max_tokens_str = vim.eval('a:max_tokens')\n if max_tokens_str:\n max_tokens = int(max_tokens_str)\n\n if not max_tokens:\n max_tokens = MAX_TOKENS_DEFAULT\n\n return max_tokens\n\ndef delete_current_line_if_empty_and_stop_below_matches_stop_string(stop):\n vim_buf = vim.current.buffer\n row, col = vim.current.window.cursor\n if row == len(vim_buf):\n return\n # Get next none empty line using get_first_line_below_cursor_with_text\n next_line = get_first_line_below_cursor_with_text()\n if next_line == stop:\n if len(vim_buf[row-1]) == 0:\n vim_buf[row-1:row] = []\n\ndef delete_empty_inserted_lines_if_stop_matches_stop_string(stop):\n vim_buf = vim.current.buffer\n row, col = vim.current.window.cursor\n if row == len(vim_buf):\n return\n # Get next none empty line using get_first_line_below_cursor_with_text\n next_line = get_first_line_below_cursor_with_text()\n if next_line == stop:\n while True:\n if row >= len(vim_buf):\n break\n # Print the number of lines.\n if len(vim_buf[row-1]) == 0:\n vim_buf[row-1:row] = []\n else:\n break\n if len(vim_buf[row-1]) == 0:\n vim_buf[row-1:row] = []\n\ndef get_first_line_below_cursor_with_text():\n vim_buf = vim.current.buffer\n row, col = vim.current.window.cursor\n while True:\n if row == len(vim_buf):\n return None\n if len(vim_buf[row]) > 0:\n return vim_buf[row]\n row += 1\n\n\ndef create_completion(stop=None): \n max_tokens = get_max_tokens()\n vim_buf = vim.current.buffer\n input_prompt = '\\n'.join(vim_buf[:])\n \n row, col = vim.current.window.cursor\n input_prompt = '\\n'.join(vim_buf[row:])\n input_prompt += '\\n'.join(vim_buf[:row-1])\n input_prompt += '\\n' + vim_buf[row-1][:col]\n if not stop:\n stop = get_first_line_below_cursor_with_text()\n response = complete_input(input_prompt, stop=stop, max_tokens=max_tokens)\n write_response(response, stop=stop)\n\ndef write_response(response, stop):\n vim_buf = vim.current.buffer\n vim_win = vim.current.window\n while True:\n # TODO: Fix bug that causes Vim to freeze when arrow keys are used.\n # Check if the user pressed any key.\n if vim_win.cursor[0] > len(vim_buf):\n return\n if vim_win.cursor[0] == len(vim_buf) and vim_win.cursor[1] > len(vim_buf[-1]):\n return\n if vim.eval('getchar(0)') != '0':\n return\n\n if USE_STREAM_FEATURE:\n single_response = next(response)\n else:\n single_response = response\n completion = single_response['choices'][0]['text']\n if single_response['choices'][0]['finish_reason'] != None:\n if stop == '\\n':\n completion += '\\n'\n row, col = vim.current.window.cursor\n current_line = vim.current.buffer[row-1]\n new_line = current_line[:col] + completion + current_line[col:]\n if not USE_STREAM_FEATURE:\n if new_line == '':\n new_line = new_line\n elif new_line[-1] == '\\n':\n new_line = new_line[:-1]\n new_lines = new_line.split('\\n')\n new_lines.reverse()\n if len(vim_buf) == row:\n vim_buf.append('')\n \n vim_buf[row-1] = None\n cursor_pos_base = tuple(vim_win.cursor)\n for row_i in range(len(new_lines)):\n vim.current.buffer[row-1:row-1] = [new_lines[row_i]]\n\n if new_line == '':\n cursor_target_col = 0\n elif new_line[-1] != '\\n':\n cursor_target_col = len(new_lines[0])\n else:\n cursor_target_col = 0\n vim_win.cursor = (cursor_pos_base[0] + row_i, cursor_target_col)\n\n if not USE_STREAM_FEATURE:\n break\n\n # Flush the vim buffer.\n vim.command(\"redraw\")\n if USE_STREAM_FEATURE:\n if single_response['choices'][0]['finish_reason'] != None:\n # delete_current_line_if_empty_and_stop_below_matches_stop_string(stop)\n delete_empty_inserted_lines_if_stop_matches_stop_string(stop)\n break\n\n\n","sub_path":"python/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"177193511","text":"import requests\nfrom config import settings\n\n\ndef get_news(ticker, num_of_articles=10):\n res = requests.get(\n f\"{settings.API_BASE_URL}/stable/stock/{ticker}/news/last/{num_of_articles}\",\n params={'token': settings.MY_API_KEY})\n data = res.json()\n\n articles = []\n for article in data:\n articles.append(article)\n return articles\n\n\ndef get_tickers_news(tickers):\n tickers_articles = []\n for ticker in tickers:\n tickers_articles.append(get_news(ticker, num_of_articles=10))\n return tickers_articles\n","sub_path":"beatthemarket/blueprints/news/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"630219127","text":"T = int(input())\nls = [\"ZRO\", \"ONE\", \"TWO\", \"THR\", \"FOR\", \"FIV\", \"SIX\", \"SVN\", \"EGT\", \"NIN\"]\nfor tc in range(1, T+1):\n num, N = input().split()\n n = list(input().split())\n cnt_ls = [0]*10\n for j in n:\n for i in range(10):\n if ls[i] == j:\n cnt_ls[i] += 1\n break\n print(f'#{tc}')\n for i in range(10):\n print((ls[i] + ' ') * cnt_ls[i], end = ' ')\n print()","sub_path":"0217/1221.GNS.py","file_name":"1221.GNS.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"391634531","text":"from data_lineage.graph.graph import Graph\nfrom data_lineage.parser.parser import parse as parse_single\nfrom data_lineage.visitors.dml_visitor import SelectSourceVisitor, SelectIntoVisitor, CopyFromVisitor\n\n\ndef parse(queries):\n parsed = []\n for query in queries:\n parsed.append(parse_single(query.sql))\n\n return parsed\n\n\ndef get_dml_queries(parsed):\n queries = []\n for node in parsed:\n select_source_visitor = SelectSourceVisitor()\n select_into_visitor = SelectIntoVisitor()\n copy_from_visitor = CopyFromVisitor()\n\n for visitor in [select_source_visitor, select_into_visitor, copy_from_visitor]:\n node.accept(visitor)\n if len(visitor.sources) > 0 and visitor.target is not None:\n queries.append(visitor)\n break\n\n return queries\n\n\ndef create_graph(dml_queries):\n graph = Graph()\n graph.create_graph(dml_queries)\n\n return graph\n","sub_path":"data_lineage/data_lineage.py","file_name":"data_lineage.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"525680575","text":"\"\"\"S-CARD to Death!\"\"\"\n\n__author__ = \"730249177\"\n\nplayer: str\npoints: int = 1\ncontinue_playing: bool = True\nCLUB: str = '\\u2663'\nSPADES: str = '\\u2660'\nDIAMOND: str = '\\u2666'\nHEART: str = '\\u2665'\nBLACK_HEART: str = '\\u2764\\uFE0F'\n\n\ndef main() -> None:\n \"\"\"The program's entrypoint.\"\"\"\n greet()\n global points \n global continue_playing \n while continue_playing:\n option1: int = int(input(\"Do you want to guess the color of a card \" + player + \"? 1 = Yes and 2 = No: \"))\n if option1 == 1:\n points = points + 5\n color()\n else:\n option2: int = int(input(player + \", do you want to guess the number on the card? 1 = Yes or 2 = No: \"))\n if option2 == 1:\n points = points + 5\n number(points)\n else:\n option3: int = int(input(\"Do you wish to end the game \" + player + \"? 1 = Yes or 2 = No: \"))\n if option3 == 1:\n points = points + 1\n continue_playing = False\n print(\"The game has ended game and you will now receieve 10 years of bad luck. Thank you for playing S-CARD to Death! You have earned \" + str(points) + \" adventure points. \" + BLACK_HEART)\n else:\n points = points + 5\n color() \n return None\n\n\ndef greet() -> None:\n \"\"\"Greeting the player.\"\"\"\n global player\n player = input(\"Enter your name: \")\n print(player + \" are you ready to play S-CARD to Death? \" + CLUB + SPADES + DIAMOND + HEART)\n print(\"About this game. A very smart, talented, beautiful individual invented this game you are about to play, called S-CARD to Death. Here are the instructions for the game. You will choose a card from the stack. Now, I must inform you that the cards in this stack may be cursed. Be very careful of your guesses, as they could lead to XDANGERX! Continue playing to see how many adventure points you can earn! \") \n return None\n\n\ndef color() -> None:\n \"\"\"The color of the card is Red.\"\"\"\n global points\n global player \n global continue_playing\n response1 = int(input(\"Pick a color; 1 = Red, 2 = Black: \"))\n if response1 == 1:\n points = points + 5\n suit: int = int(input(\"Nice job \" + player + \"! You have earned 5 points. Now, guess the suit of the card. 1 = Hearts, 2 = Spades, 3 = Clubs, 4 = Diamonds. \"))\n from random import randint\n e = randint(1, 4)\n if suit > 0:\n while e > suit:\n points = points + 5\n e = e - 1\n print(f\"Good work {player}! You have earned 5 points. Final question. \")\n number(points)\n else:\n points = points + 1\n print(\"Sorry! That answer was incorrect. The game has ended and you will be single forever. Your total adventure points earned are \" + str(points))\n keep_playing() \n return None\n\n\ndef number(points: int) -> int:\n \"\"\"The number on the card is lower than 6.\"\"\"\n points = points\n global player \n global continue_playing\n y: int = int(input(\"Guess if the number is higher or lower than 6: 1 = Higher, 2 = Lower: \"))\n if y == 2:\n points = points + 5\n print(\"Wow \" + player + \", you rock! You have earned 5 points. \")\n print(\"You have won the game \" + player + \" !!! Congratulations! \" + CLUB + SPADES + HEART + DIAMOND + \" Thank you for playing S-CARD to Death. Your total adventure points earned are \" + str(points))\n else:\n points = points + 1\n print(\"Sorry! That answer was incorrect. The game has ended and you will be single forever. Your total adventure points earned are \" + str(points))\n return points\n\n\ndef keep_playing() -> None:\n \"\"\"Do you want to continue playing the game?\"\"\"\n global continue_playing \n question1: int = int(input(\"Do you want to continue playing the game? 1 = Yes and 2 = No: \"))\n if question1 == 1:\n main()\n else:\n continue_playing = False\n print(\"Goodbye. \")\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"projects/cyoa.py","file_name":"cyoa.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"481761672","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\n #from google.colab import files\nfrom keras.preprocessing import image\nfrom PIL import Image\nimport cv2\nimport warnings\nwarnings.filterwarnings('ignore')\n\n(train_images,train_labels),(test_images,test_labels) = keras.datasets.mnist.load_data()\n\ntrain_images = train_images.reshape(len(train_images),28,28,1) # (60,000,784)\ntest_images = test_images.reshape(len(test_images),28,28,1)\n\nmodel = keras.models.Sequential([\n keras.layers.Conv2D(64,(3,3),activation='relu',input_shape=(28,28,1)),\n keras.layers.MaxPool2D(2,2),\n keras.layers.Flatten(),\n keras.layers.Dense(28,activation='relu'),\n keras.layers.Dense(10,activation='softmax')\n ])\n\nprint(model.summary())\n\nmodel.compile(optimizer='adam',metrics=['acc'],loss='sparse_categorical_crossentropy')\n\nmodel.fit(train_images,train_labels,epochs=10,batch_size=32, validation_split=0.1)\n\nmodel.evaluate(test_images,test_labels)\n\ndef predict_img(path):\n img = image.load_img(path)\n x = image.img_to_array(img)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY) #converting from rbg image to grayscale\n plt.imshow(x,cmap='gray')\n plt.show()\n x = cv2.resize(np.array(x), (28, 28)) #resizing it to 28x28\n x = x.reshape(28,28,1) #Reshaping it to fit in our model\n x = np.expand_dims(x, axis=0)\n class_label = model.predict(x) #predicting\n print('Predicted Value is:',np.where(class_label[0]==max(class_label[0]))[0])\n\npredict_img(r'C:\\\\Users\\\\Yash\\\\Desktop\\\\KTH\\\\Sem1-p2-AI\\\\ProjectNn\\\\Digits\\\\2.png')\n#\n","sub_path":"SomeWhatWorks.py","file_name":"SomeWhatWorks.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"581704908","text":"\n\n#calss header\nclass _HIVE():\n\tdef __init__(self,): \n\t\tself.name = \"HIVE\"\n\t\tself.definitions = [u'a structure where bees live, especially a beehive (= container like a box) or the group of bees living there', u\"a condition in which a person's skin develops red raised areas: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_hive.py","file_name":"_hive.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"68538609","text":"# /usr/bin/python\n# -*- coding:utf-8 -*-\nfrom script.assert_util import AssertUtil\nimport json\n\nclass BaseBusiness(object):\n def __init__(self):\n pass\n\n def init_params(self, local_vars_copy):\n '''\n 请求参数,自动赋值\n :param local_vars_copy:\n :return: 处理后的请求参数\n '''\n if 'self' in local_vars_copy:\n local_vars_copy.pop('self')\n\n param_dict = local_vars_copy.copy()\n header_dict = {}\n file_dict = {}\n for key in local_vars_copy:\n if local_vars_copy[key] is None:\n param_dict.pop(key)\n continue\n if key.__contains__(\"__py_debug_temp\"):\n param_dict.pop(key)\n if key.startswith('header_'):\n param_dict.pop(key)\n header_key = key.split('_')[1]\n header_dict[header_key] = local_vars_copy[key]\n elif key.startswith('file_'):\n param_dict.pop(key)\n file_key = key.split('_')[1]\n file_dict[file_key] = local_vars_copy[key]\n elif key.startswith('param_'):\n param_dict.pop(key)\n param_key = key.split('_')[1]\n param_dict[param_key] = local_vars_copy[key]\n\n return (header_dict, param_dict, file_dict,)\n\n def init_response(self, local_vars_copy, instance, func_name, if_assert=1,\n assert_str='{\"code\":0, \"message\":\"成功\"}', msg=''):\n '''\n 请求参数自动赋值,返回接口返回值,支持断言\n :param local_vars_copy:请求接口的参数\n :param instance:接口所在的实例\n :param func_name:接口名称\n :param if_assert:是否做断言\n :param assert_str:断言需要的值(字典)\n :param msg:若断言失败,需要输出的提示\n :return:返回接口的返回值\n '''\n header_dict, param_dict, file_dict = self.init_params(local_vars_copy)\n try:\n param_dict.pop(\"if_assert\")\n param_dict.pop(\"assert_str\")\n param_dict.pop(\"msg\")\n except:\n pass\n response = getattr(instance, func_name)(param_dict=param_dict, header_dict=header_dict)\n if if_assert:\n if isinstance(assert_str, str):\n assert_str = json.loads(assert_str)\n AssertUtil.assert_key_value_in_list(assert_str, response, msg=msg+str(response))\n return response\n\n\n\n\n","sub_path":"mobike-api-test/lib/business/base_business.py","file_name":"base_business.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"368441872","text":"import os\nimport logging\nimport bot\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n if 'SLACK_BOT_TOKEN' not in os.environ:\n logger.debug('SLACK_BOT_TOKEN not found in environment')\n raise EnvironmentError('SLACK_BOT_TOKEN not found in environment')\n\n slackbot_token = os.environ['SLACK_BOT_TOKEN']\n bot_instance = bot.Bot(slackbot_token)\n bot_instance.connect_and_listen()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"slackbot/prathamam/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"285991877","text":"# Discord.py is smoooooooooooooosh!!!!!\nimport discord\nfrom discord.ext import tasks, commands\nimport asyncio\n\nimport os # .env読み込みスターズ。\nimport json\n\nclass Thread(commands.Cog):\n def __init__(self, airlinia):\n self.bot = airlinia #botを受け取る。\n\n @commands.Cog.listener()\n async def on_reaction_add(self, reaction, user):\n if reaction.message.channel.category_id == 668142017175617546 or reaction.message.channel.category_id == 668374572080562177:\n if reaction.emoji.id == 665462194116493313:\n members = [reaction.message.author, user]\n channel = await self._channel_create(reaction.message.channel.category, members, 'Thread')\n embed_1 = discord.Embed(title='チャンネル作成しました。',\n description=f'{channel.mention}\\rスレッドを作成しました。',\n color=0x0080ff)\n await reaction.message.channel.send(embed=embed_1, content=f'{user.mention}、{reaction.message.author.mention}')\n\n embed_2 = discord.Embed(description=f'{reaction.message.content}',\n color=0x0080ff)\n embed_2.set_footer(text='国際空創国家連合', icon_url='https://cdn.discordapp.com/attachments/658699920039215114/670817582034714635/b16b12b993469c42.gif')\n embed_2.set_author(name=user.display_name, icon_url=user.avatar_url)\n embed_2.set_thumbnail(url=reaction.message.author.avatar_url)\n if len(reaction.message.attachments) > 0:\n embed_2.set_image(url=reaction.message.attachments[0].url)\n await channel.send(embed=embed_2, content=f'{user.mention}、{reaction.message.author.mention}')\n\n async def _channel_create(self, category, members, name):\n overwrites = {\n self.bot.user:\n discord.PermissionOverwrite.from_pair(discord.Permissions.all(), discord.Permissions.none()),\n category.guild.default_role:\n discord.PermissionOverwrite.from_pair(discord.Permissions.none(), discord.Permissions.all()),\n members[0]:\n discord.PermissionOverwrite.from_pair(discord.Permissions(66448721), discord.Permissions.none()),\n members[1]:\n discord.PermissionOverwrite.from_pair(discord.Permissions(66448721), discord.Permissions.none()),\n category.guild.get_role(655254335030034442): #閲覧できる役職\n discord.PermissionOverwrite.from_pair(\n discord.Permissions(37080128), discord.Permissions(2 ** 53 - 37080129)),\n }\n channel = await category.create_text_channel(name, overwrites=overwrites)\n return channel\n\ndef setup(airlinia):\n airlinia.add_cog(Thread(airlinia))\n","sub_path":"airlinia_cogs/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"111487542","text":"'''\nGiven a sorted list, remove the duplicates in place, return modified list and #unique values. \n\nExample: \nA = [1,1,2] should return length = 2, and A is now [1,2].\n'''\n\n'''\nNOTES\n1. Since the list is sorted, you need to store the last value\n2. If can either start from start or end\n3. Keep track of #items, #unique items\n4. Pop #items - #unique items times!\n'''\n\n#Solution 1: Inefficient with space\n#Start from the end\ndef RemoveDuplicates(input):\n length = len(input)-1\n distinctCount = length+1 #default length\n seen = None #last value seen\n while(length >= 0):\n #First time!\n if (seen is None):\n seen = input[length]\n #Distinct\n elif(input[length] < seen):\n seen = input[length]\n #Duplicate\n else:\n input.pop(length) #Inefficient, since there can be duplicates in the middle\n distinctCount -= 1\n length -= 1\n return(input,distinctCount)\n\n#Solution 2:\n#Iterate list once, have all the duplicated values at the end (ending at certain index)\n#pop #items - #unique items times!\n\ninput = [1,2,3,3,4,5,7]\nprint(\"input >>>\", input)\nresult,count = RemoveDuplicates(input)\nprint(\"modified list without duplicates >>>\", result)\nprint(\"#distinct values=\",count)\n","sub_path":"sorted-list-remove-duplicates.py","file_name":"sorted-list-remove-duplicates.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"508303709","text":"import os\nimport sys\nimport time\nfrom datetime import datetime\nimport socket\nimport irc_bot\nimport configparser\n\nfrom pysrt import SubRipFile\nfrom pysrt import SubRipItem\nfrom pysrt import SubRipTime\n\ndef iso8601_utc_now():\n\treturn datetime.utcnow().isoformat(sep='T') + \"Z\"\n\ndef make_offset_str(offset_hours):\n\toffset_hours = int(offset_hours)\n\tif offset_hours == 0:\n\t\treturn \"Z\"\n\tif offset_hours > 0:\n\t\tsign = \"+\"\n\telse:\n\t\tsign = \"-\"\n\toffset_str = str(abs(offset_hours))\n\tif len(offset_str) < 2:\n\t\toffset_str = \"0\" + offset_str\n\treturn sign + offset_str + \":00\"\n\ndef iso8601_local_now():\n\treturn datetime.now().isoformat(sep='T') + make_offset_str(utc_offset_hours)\n\ndef parse_chat_server(chat_server):\n\treturn chat_server.replace(' ', '').split(':')\n\ndef ensure_dir(dir_path):\n if not os.path.exists(dir_path):\n print(\"creating directory \" + dir_path)\n os.makedirs(dir_path)\n\ndef log_add(path, content):\n\twith open(path, mode='a', encoding='utf-8') as log_file:\n\t\tlog_file.write(content)\n\ndef safe_print(content):\n\ttry:\n\t\tprint(content)\n\texcept UnicodeEncodeError:\n\t\tprint(content.encode('utf-8'))\n\ndef get_timestamp(ts_format):\n\tif ts_format == 0:\n\t\treturn str(time.time())[:15]\n\telif ts_format == 2:\n\t\treturn iso8601_local_now()\n\telse:\n\t\treturn iso8601_utc_now()\n\nif(len(sys.argv) != 3):\n print(__file__ + ' channel server_type')\n sys.exit(0)\n\ncurrent_directory = os.path.dirname(os.path.abspath(__file__))\nconfig_path = current_directory + \"/config.txt\"\nif os.path.isfile(config_path):\n\tconfig = configparser.ConfigParser()\n\tconfig.read(config_path)\n\tusername = config.get('Settings', 'username').replace(' ', '').lower()\n\toauth = config.get('Settings', 'oauth')\n\trecord_raw = config.getboolean('Settings', 'record_raw')\n\ttimestamp_format = config.getint('Settings', 'timestamp_format')\n\ttwitchclient_version = config.getint('Settings', 'twitchclient_version')\n\tregular_chat_server = config.get('Settings', 'regular_chat_server')\n\tgroup_chat_server = config.get('Settings', 'group_chat_server')\n\tevent_chat_server = config.get('Settings', 'event_chat_server')\nelse:\n\tprint(\"config.txt not found\", file=sys.stderr)\n\tsys.exit(0)\n\nts = time.time()\nutc_offset_hours = int(int((datetime.fromtimestamp(ts) - datetime.utcfromtimestamp(ts)).total_seconds()) / 3600)\n\nserver_dict = {'r':parse_chat_server(regular_chat_server), 'g':parse_chat_server(group_chat_server), 'e':parse_chat_server(event_chat_server)}\nchat_channel = sys.argv[1]\nchat_server = server_dict[sys.argv[2].lower()]\n\nensure_dir(current_directory + '/comment_log')\nif record_raw:\n\tensure_dir(current_directory + '/comment_log_raw')\n\nraw_log_path = current_directory + '/comment_log_raw/' + chat_channel + '.txt'\nlog_path = current_directory + '/comment_log/' + chat_channel + '.txt'\n\nsrt_log_path = current_directory + '/comment_log/' + chat_channel + '.srt'\n\nbot = irc_bot.irc_bot(username, oauth, chat_channel, chat_server[0], chat_server[1], twitchclient_version = twitchclient_version)\n\noutsrt = SubRipFile()\n\ntext = ''\n\nwhile 1:\n\traw_msg_list = bot.get_message()\n\tif len(raw_msg_list) > 0:\n\t\tif len(text) > 0:\n\t\t\tend = SubRipTime.from_time(datetime.now())\n\t\t\titem = SubRipItem(0, start, end, text)\n\t\t\toutsrt.append(item)\n\t\tstart = SubRipTime.from_time(datetime.now())\n\t\ttext = ''\n\t\ttimestamp = get_timestamp(timestamp_format)\n\t\tfor item in raw_msg_list:\n\t\t\tif record_raw:\n\t\t\t\tlog_add(raw_log_path, timestamp + ' ' + item + '\\n')\n\t\t\tusername, message = irc_bot.parse_user(item)\n\t\t\tif username != '':\n\t\t\t\tsafe_print(chat_channel + \" \" + username + \": \" + message)\n\t\t\t\tlog_add(log_path, timestamp + ' ' + username + ': ' + message + '\\n')\n\t\t\t\ttext += username + \": \" + message + '\\n'\n\t\t\t\toutsrt.clean_indexes()\n\t\t\t\toutsrt.save(srt_log_path, encoding='utf-8')\n\n\n","sub_path":"comment_logger_srt.py","file_name":"comment_logger_srt.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"161190043","text":"# Developer: marcioz98\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\n\ndef main(url):\n\tbrowser = webdriver.PhantomJS()\n\tbrowser.get(url)\n\n\tsoup = BeautifulSoup(browser.page_source, \"html.parser\")\n\n\tbrowser.quit()\n\n\tfilms = soup.find_all('div', {'class' : 'FilmItem film'})\n\n\toutput = \"\"\n\n\tfor film in films:\n\t\tfilm_block = film.prettify()\n\t\tsoup = BeautifulSoup(film_block, \"html.parser\")\n\t\ttitles = soup.find_all('h5')\n\t\thours = soup.find_all('span', {'class' : 'mid'})\n\t\tfor title in titles:\n\t\t\toutput += title.text\n\t\tfor hour in hours:\n\t\t\toutput += hour.text\n\n\treturn output\n\n\n","sub_path":"tpscraper.py","file_name":"tpscraper.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"643509388","text":"MUJOCO_ENVS = ['ant', 'hopper', 'halfcheetah', 'humanoid', 'pusher', 'reacher', 'striker', 'swimmer', 'thrower', 'walker']\n\nCHECKPOINT_DICT = {\n 'enduro': (3100, 3650, 4450, 50),\n 'montezumarevenge': (0, 0, 0, 0),\n 'seaquest': (10, 65, 70, 5),\n #'hero': (300, 1500, 2400, 50),\n 'other': (50, 600, 1450, 50)\n}\n\ndef get_env_id_type(env_name):\n env_type = \"atari\"\n\n if env_name == \"spaceinvaders\":\n env_id = \"SpaceInvadersNoFrameskip-v4\"\n elif env_name == \"mspacman\":\n env_id = \"MsPacmanNoFrameskip-v4\"\n elif env_name == \"montezumarevenge\":\n env_id = \"MontezumaRevengeNoFrameskip-v4\"\n elif env_name == \"videopinball\":\n env_id = \"VideoPinballNoFrameskip-v4\"\n elif env_name == \"beamrider\":\n env_id = \"BeamRiderNoFrameskip-v4\"\n elif env_name == \"halfcheetah\":\n env_id = \"HalfCheetah-v2\"\n env_type = 'mujoco'\n elif env_name in MUJOCO_ENVS:\n env_id = env_name[0].upper() + env_name[1:] + \"-v2\"\n env_type = 'mujoco'\n else:\n env_id = env_name[0].upper() + env_name[1:] + \"NoFrameskip-v4\"\n\n return env_id, env_type\n\n\ndef get_checkpoint_range(env_name, demo=True):\n if demo:\n _min, _max, _step = get_checkpoints_demos(env_name)\n else:\n _min, _max, _step = get_checkpoints_extrapolate(env_name)\n\n return range(_min, _max + _step, _step)\n\n\ndef get_checkpoints_demos(env_name):\n _min, _max, _, _step = CHECKPOINT_DICT['other']\n for key in CHECKPOINT_DICT.keys():\n if env_name in key:\n _min, _max, _, _step = CHECKPOINT_DICT[key]\n break\n\n return _min, _max, _step\n\n\ndef get_checkpoints_extrapolate(env_name):\n _, _min, _max, _step = CHECKPOINT_DICT['other']\n for key in CHECKPOINT_DICT.keys():\n if env_name in key:\n _, _min, _max, _step = CHECKPOINT_DICT[key]\n break\n\n return _min, _max, _step\n","sub_path":"atari/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"229770855","text":"from options.train_options import TrainOptions\nfrom data.dataloader import MSDSurfTrainDataset\n\nopts = TrainOptions().parse()\ndataset = MSDSurfTrainDataset(opts)\nimport torch\nimport numpy as np\n\n\ndef collate_fn(batch):\n \"\"\"Creates mini-batch tensors\n We should build custom collate_fn rather than using default collate_fn\n \"\"\"\n meta = {}\n keys = batch[0].keys()\n for key in keys:\n meta.update({key: np.array([d[key] for d in batch])})\n return meta\n\n\ndataloader = torch.utils.data.DataLoader(dataset,\n batch_size=2,\n shuffle=False,\n num_workers=2,\n collate_fn=collate_fn)\nfor i, data in enumerate(dataloader):\n if i == 0:\n break\nprint(data['img_patch'].shape)\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"18702154","text":"from __future__ import absolute_import\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nfrom matplotlib.patches import Polygon, Ellipse\n\n#TODO: add a simple swarmplot implementation\n__all__ = [\n 'gradient_line', 'irregular_contour',\n 'voronoi_filled', 'pca_ellipse', 'embedded_images', 'jitterplot'\n]\n\n\ndef gradient_line(xs, ys, colormap_name='jet', ax=None):\n '''Plot a 2-d line with a gradient representing ordering.\n See http://stackoverflow.com/q/8500700/10601 for details.'''\n if ax is None:\n ax = plt.gca()\n cm = plt.get_cmap(colormap_name)\n npts = len(xs)-1\n colors = cm(np.linspace(0, 1, num=npts))\n if hasattr(ax, 'set_prop_cycle'):\n ax.set_prop_cycle(color=colors)\n else:\n ax.set_color_cycle(colors)\n for i in range(npts):\n ax.plot(xs[i:i+2],ys[i:i+2])\n return plt.show\n\n\ndef irregular_contour(x, y, z, func=plt.contourf, func_kwargs=dict(),\n grid_size=(100,100), padding_fraction=0.05,\n interp_method='nearest'):\n '''Handles interpolating irregular data to a grid,\n and plots it using the given func [default: contourf]\n See http://wiki.scipy.org/Cookbook/Matplotlib/Gridding_irregularly_spaced_data\n '''\n from scipy.interpolate import griddata # Late import; scipy is optional\n x, y, z = map(np.asanyarray, (x, y, z))\n x_range = (x.min(), x.max())\n y_range = (y.min(), y.max())\n pad_x = padding_fraction * -np.subtract.reduce(x_range)\n pad_y = padding_fraction * -np.subtract.reduce(y_range)\n grid_x = np.linspace(x_range[0] - pad_x, x_range[1] + pad_x, grid_size[0])\n grid_y = np.linspace(y_range[0] - pad_y, y_range[1] + pad_y, grid_size[1])\n grid_z = griddata((x, y), z, (grid_x[None], grid_y[:,None]),\n method=interp_method)\n return func(grid_x, grid_y, grid_z, **func_kwargs)\n\n\ndef voronoi_filled(points_or_voronoi, colors, show_points=False,\n padding_fraction=0.05, cmap=None, ax=None, alpha=None,\n edgecolor=None):\n '''Plots a filled voronoi diagram, using the given points and their colors.\n The first parameter must be an array-like or a scipy.stats.Voronoi object.\n '''\n from scipy.spatial import Voronoi # Late import; scipy is optional\n\n # Disambiguate the first parameter\n if isinstance(points_or_voronoi, Voronoi):\n vor = points_or_voronoi\n else:\n points = np.asanyarray(points_or_voronoi)\n assert points.shape[1] == 2, 'Input points must be 2D'\n vor = Voronoi(points)\n\n # Borrowed from http://nbviewer.ipython.org/gist/pv/8037100\n regions = []\n vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n radius = vor.points.ptp().max()*2\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n verts = vor.regions[region]\n if all(v >= 0 for v in verts):\n # finite region\n regions.append(verts)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in verts if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign((midpoint - center).dot(n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(vertices))\n vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([vertices[v] for v in new_region])\n vs -= vs.mean(axis=0)\n angle_order = np.argsort(np.arctan2(vs[:,1], vs[:,0]))\n new_region = np.array(new_region)[angle_order]\n\n # finish\n regions.append(new_region)\n vertices = np.asarray(vertices)\n\n # Plot colored polygons\n if ax is None:\n ax = plt.gca()\n polys = PatchCollection([Polygon(vertices[region]) for region in regions],\n cmap=cmap, alpha=alpha, edgecolor=edgecolor)\n polys.set_array(np.asanyarray(colors))\n ax.add_collection(polys)\n\n if show_points:\n ax.plot(vor.points[:,0], vor.points[:,1], 'ko')\n\n # Zoom to a reasonable scale.\n pad = padding_fraction * (vor.max_bound - vor.min_bound)\n mins = vor.min_bound - pad\n maxes = vor.max_bound + pad\n ax.set_xlim(mins[0], maxes[0])\n ax.set_ylim(mins[1], maxes[1])\n return polys\n\n\ndef pca_ellipse(data, loc=None, ax=None, **ellipse_kwargs):\n '''Finds the 2d PCA ellipse of given data and plots it.\n loc: center of the ellipse [default: mean of the data]\n '''\n from sklearn.decomposition import PCA # Late import; sklearn is optional\n pca = PCA(n_components=2).fit(data)\n if loc is None:\n loc = pca.mean_\n if ax is None:\n ax = plt.gca()\n cov = pca.explained_variance_ * pca.components_.T\n u,s,v = np.linalg.svd(cov)\n width,height = 2*np.sqrt(s[:2])\n angle = np.rad2deg(np.arctan2(u[1,0], u[0,0]))\n ell = Ellipse(xy=loc, width=width, height=height, angle=angle,\n **ellipse_kwargs)\n ax.add_patch(ell)\n return ell\n\n\ndef embedded_images(X, images, exclusion_radius=None, ax=None, cmap=None,\n zoom=1, seed=None, frameon=False):\n '''Plots a subset of images on an axis. Useful for visualizing image\n embeddings, especially when plotted over a scatterplot. Selects random points\n to annotate with their corresponding image, respecting an exclusion_radius\n around each selected point.'''\n assert X.shape[0] == images.shape[0], 'Unequal number of points and images'\n assert X.shape[1] == 2, 'X must be 2d'\n if ax is None:\n ax = plt.gca()\n if exclusion_radius is None:\n # TODO: make a smarter default based on image size and axis limits\n exclusion_radius = 1.\n if seed is not None:\n np.random.seed(seed)\n while X.shape[0] > 0:\n i = np.random.choice(X.shape[0])\n im = OffsetImage(images[i], zoom=zoom, cmap=cmap)\n ab = AnnotationBbox(im, X[i], xycoords='data', frameon=frameon)\n ax.add_artist(ab)\n dist = np.sqrt(np.square(X[i] - X).sum(axis=1))\n mask = (dist > exclusion_radius).ravel()\n X = X[mask]\n images = images[mask]\n return plt.show\n\n\ndef jitterplot(data, positions=None, ax=None, vert=True, scale=0.1,\n **scatter_kwargs):\n '''Plots jittered points as a distribution visualizer.\n\n Scatter plot arguments default to: marker='.', c='k', alpha=0.75\n Also known as a stripplot.\n See also: boxplot, violinplot, beeswarm\n '''\n if ax is None:\n ax = plt.gca()\n if positions is None:\n positions = range(len(data))\n\n kwargs = dict(marker='.', c='k', alpha=0.75)\n kwargs.update(scatter_kwargs)\n\n for pos, y in zip(positions, data):\n if scale > 0:\n x = np.random.normal(loc=pos, scale=scale, size=len(y))\n else:\n x = np.zeros_like(y) + pos\n if not vert:\n x, y = y, x\n ax.scatter(x, y, **kwargs)\n return plt.show\n","sub_path":"viztricks/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":7293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"31308504","text":"import sys\nsys.stdin = open('17298_오큰수.txt', 'rt')\n\n\nN = int(input())\nA = list(map(int, input().split()))\nstack = []\nNGE = [-1]*N\nfor i in range(N):\n while stack and A[stack[-1]] < A[i]:\n NGE[stack.pop()] = A[i]\n stack.append(i)\nprint(*NGE)","sub_path":"BaekJoon/단계별로 풀어보기/스택/17298_오큰수.py","file_name":"17298_오큰수.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"55266860","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('adm', '0109_auto_20161031_1053'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='ot_linea',\n name='codigo',\n field=models.CharField(default=0, max_length=14, verbose_name=b'Codigo', validators=[django.core.validators.RegexValidator(b'^\\\\d{14}$')]),\n preserve_default=False,\n ),\n ]\n","sub_path":"adm/migrations/0110_ot_linea_codigo.py","file_name":"0110_ot_linea_codigo.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"647852944","text":"#!/usr/bin/env python\n\n__all__ = ['letv_download']\n\nfrom common import *\nimport re, base64, json\n\n#http://www.letv.com/ptv/pplay/5938/1.html\n\ndef get_title(s1):\n #get title\n p1 = r'<\\s*title\\s*>\\s*(.*?)\\s*<\\s*/\\s*title\\s*>'\n o1 = re.search(p1,s1,re.I|re.S)\n assert(o1)\n title = o1.group(1)\n suffix = ' - \\xe5\\x9c\\xa8\\xe7\\xba\\xbf\\xe8\\xa7\\x82\\xe7\\x9c\\x8b - \\xe4\\xb9\\x90\\xe8\\xa7\\x86\\xe7\\xbd\\x91'\n pos = o1.group(1).rfind(suffix)\n if -1 != pos:\n title = title[:pos]\n return title.decode('utf-8') #return unicode\n \ndef letv_download(url):\n s1 = get_html(url)\n p1 = r''']*?id=.?j-videoplay.*?\"\"\"\n\n\nclass CustomerField(Select):\n template_name = 'jobs/customer_select.html'\n\n\nclass CustomSelect(Select):\n template_name = 'jobs/bootstrap_select.html'\n\n\nclass JobCreateForm2(forms.ModelForm):\n class Meta:\n model = Job\n fields = '__all__'\n\nclass JobCreateForm(forms.ModelForm):\n TIMELIST = (\n (1, \"0:30\"),\n (2, \"1:00\"),\n (3, \"1:30\"),\n (4, \"2:00\"),\n (5, \"2:30\"),\n (6, \"3:00\"),\n (7, \"3:30\"),\n (8, \"4:00\"),\n (9, \"4:30\"),\n (10, \"5:00\"),\n (11, \"5:30\"),\n (12, \"6:00\"),\n (13, \"6:30\"),\n (14, \"7:00\"),\n (15, \"7:30\"),\n (16, \"8:00\"),\n )\n year = forms.CharField(required=False, label=\"Vehicle Year\", widget=forms.Select(attrs={'id': 'ajYears', 'class': 'form-control'}))\n make = forms.CharField(required=False, label=\"Vehicle Make\",\n widget=forms.Select(attrs={'id': 'ajMakes', 'disabled': 'disabled', 'class': 'form-control'}))\n model = forms.CharField(required=False, label=\"Vehicle Model\",\n widget=forms.Select(attrs={'id': 'ajModels', 'disabled': 'disabled', 'class': 'form-control'}))\n style = forms.CharField(required=False, label=\"Vehicle Style\",\n widget=forms.Select(attrs={'id': 'ajStyles', 'disabled': 'disabled', 'class': 'form-control'}))\n duration = forms.ChoiceField(required=True, label=\"Job Duration\", choices=TIMELIST, widget=forms.Select(attrs={'class': 'form-control'}))\n\n\n class Meta:\n model = Job\n fields = [\n 'customer',\n 'year',\n 'make',\n 'model',\n 'style',\n 'type',\n 'description',\n 'address',\n 'city',\n 'state',\n 'zip',\n 'duration',\n ]\n exclude = ['salesperson']\n widgets = {\n 'type': forms.Select(attrs={'class': 'form-control'}),\n 'description': forms.Textarea({'class': 'form-control'}),\n 'customer': CustomerField(attrs={'data-toggle': 'tooltip',\n 'title': mark_safe('Select Customer'),\n 'class': 'form-control'}),\n 'address': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Address'}),\n 'city': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'City'}),\n 'state': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'State'}),\n 'zip': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Zip Code'}),\n }\n\n def save(self, commit=True):\n event = Event.objects.create(duration=self.cleaned_data.get('duration'), title=self.cleaned_data.get('customer'))\n job = super(JobCreateForm, self).save(commit=False)\n job.event = event\n if commit:\n job.save()\n return job\n\n\nclass JobUpdateForm(forms.ModelForm):\n TIMELIST = (\n (1, \"0:30\"),\n (2, \"1:00\"),\n (3, \"1:30\"),\n (4, \"2:00\"),\n (5, \"2:30\"),\n (6, \"3:00\"),\n (7, \"3:30\"),\n (8, \"4:00\"),\n (9, \"4:30\"),\n (10, \"5:00\"),\n (11, \"5:30\"),\n (12, \"6:00\"),\n (13, \"6:30\"),\n (14, \"7:00\"),\n (15, \"7:30\"),\n (16, \"8:00\"),\n )\n year = forms.CharField(required=False, label=\"Vehicle Year\",\n widget=forms.Select(attrs={'id': 'ajYears', 'class': 'form-control'}))\n make = forms.CharField(required=False, label=\"Vehicle Make\",\n widget=forms.Select(\n attrs={'id': 'ajMakes', 'disabled': 'disabled', 'class': 'form-control'}))\n model = forms.CharField(required=False, label=\"Vehicle Model\",\n widget=forms.Select(\n attrs={'id': 'ajModels', 'disabled': 'disabled', 'class': 'form-control'}))\n style = forms.CharField(required=False, label=\"Vehicle Style\",\n widget=forms.Select(\n attrs={'id': 'ajStyles', 'disabled': 'disabled', 'class': 'form-control'}))\n\n class Meta:\n model = Job\n fields = [\n 'customer',\n 'year',\n 'make',\n 'model',\n 'style',\n 'type',\n 'description',\n 'address',\n 'city',\n 'state',\n 'zip',\n ]\n exclude = ['salesperson']\n widgets = {\n 'type': forms.Select(attrs={'class': 'form-control'}),\n 'description': forms.Textarea({'class': 'form-control'}),\n 'customer': CustomerField(attrs={'data-toggle': 'tooltip',\n 'title': mark_safe('Select Customer'),\n 'class': 'form-control'}),\n 'address': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Address'}),\n 'city': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'City'}),\n 'state': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'State'}),\n 'zip': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Zip Code'}),\n }\n\n def save(self, commit=True):\n job = super(JobUpdateForm, self).save(commit=False)\n job.event.title = job.full_name_text\n if commit:\n job.save()\n return job\n\n\nclass AddDollarSign(forms.NumberInput):\n template_name = 'jobs/dollar.html'\n\n\nclass JobCloseForm(forms.ModelForm):\n class Media:\n css = {\n 'all': (\n 'eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.css',\n 'bootstrap/dist/css/bootstrap.css',\n )\n }\n js = (\n 'moment/min/moment.min.js',\n 'eonasdan-bootstrap-datetimepicker/build/js/bootstrap-datetimepicker.min.js',\n 'footable/compiled/footable.js'\n )\n\n class Meta:\n model = Job\n fields = [\n 'completion_time',\n 'labor_price',\n ]\n widgets = {\n 'completion_time': DateTimePicker(\n options={\n \"format\": \"YYYY-MM-DD hh:mm a\",\n \"stepping\": 15,\n \"allowInputToggle\": True,\n },\n ),\n }\n completion_time = forms.DateField(('%Y-%m-%d %I:%M %p',), required=True, widget=DateTimePicker(\n options={\n \"format\": \"YYYY-MM-DD hh:mm A\",\n \"stepping\": 15,\n \"allowInputToggle\": True,\n },\n ))\n labor_price = forms.DecimalField(decimal_places=2, required=True, widget=AddDollarSign(attrs={'step': 0.01, 'class': 'form-control'}))\n","sub_path":"jobs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"46201672","text":"# Dominion game constants.\n\nCORE_CARDS = ['Estate', 'Duchy', 'Province', 'Colony',\n 'Copper', 'Silver', 'Gold', 'Platinum',\n 'Potion', 'Curse', 'Ruins']\n\n# this stores all cards with more than 10 cards in the supply\n# doesn't hold victory cards, those are handled in another case\nSPECIAL_SUPPLY_COUNTS = {\n 'Copper': 60,\n 'Silver': 40,\n 'Gold': 30,\n 'Platinum': 12,\n 'Potion': 12,\n 'Spoils': 15,\n 'Rats': 20,\n}\n\n# NOTE: Spoils can't be handled unambigously\nNON_SUPPLY = {'Diadem': 'Tournament', 'Followers': 'Tournament',\n 'Trusty Steed': 'Tournament', 'Princess': 'Tournament',\n 'Bag of Gold': 'Tournament', 'Madman': 'Hermit',\n 'Mercenary': 'Urchin'}\n\nVP_CARDS = ['Estate', 'Duchy', 'Province', 'Colony', 'Gardens', 'Silk Road',\n 'Vineyard', 'Fairgrounds', 'Duke', 'Feodum', 'Great Hall',\n 'Nobles', 'Tunnel', 'Island']\n\nRUINSES = ['Ruined Library', 'Ruined Village', 'Survivors', 'Abandoned Mine',\n 'Ruined Market']\n\nKNIGHTS = ['Dame Anna', 'Dame Josephine', 'Dame Molly', 'Dame Natalie',\n 'Dame Sylvia', 'Sir Bailey', 'Sir Destry', 'Sir Vander',\n 'Sir Michael', 'Sir Martin']\n\nSHELTERS = ['Hovel', 'Overgrown Estate', 'Necropolis']\n\nLOOTERS = ['Marauder', 'Death Cart', 'Cultist']\n\nSPOILS_GIVERS = ['Marauder', 'Bandit Camp', 'Pillage']\n\nPRIZES = ['Diadem', 'Followers', 'Trusty Steed', 'Princess', 'Bag of Gold']\n\nBOT_NAMES = ['Banker Bot', 'Conqueror Bot', 'Defender Bot', 'Lord Bottington',\n 'Serf Bot', 'Villager Bot', 'Warlord Bot', 'Village Idiot Bot']\nbot_copies = []\nfor bot in BOT_NAMES:\n for nth in ['I', 'II', 'III', 'IV', 'V', 'VI']:\n bot_copies.append('%s %s' % (bot, nth))\nBOT_NAMES += bot_copies\n\n# A giant card type dictionary\n# Wheeeeeee\n# (This is copy pasted from the Javascript for the log prettifier,\n# thank you for your busywork sacrifice)\nCARDNAME_TO_TYPE = {\n 'Border Village':'action',\n 'Farming Village':'action',\n 'Mining Village':'action',\n 'Native Village':'action',\n 'Walled Village':'action',\n 'Worker\\'s Village':'action',\n 'Ruined Village':'action-ruins',\n 'Fishing Village':'action-duration',\n 'Village':'action',\n 'Ruined Library':'action-ruins',\n 'Library':'action',\n 'Abandoned Mine':'action-ruins',\n 'Mine':'action',\n 'Bag of Gold':'action',\n 'Fool\\'s Gold':'treasure-reaction',\n 'Gold':'treasure',\n 'Overgrown Estate':'shelter-victory',\n 'Estate':'victory',\n 'Counting House':'action',\n 'Count':'action',\n 'Coppersmith':'action',\n 'Copper':'treasure',\n 'Ruined Market':'action-ruins',\n 'Grand Market':'action',\n 'Black Market':'action',\n 'Market Square':'action-reaction',\n 'Market':'action',\n 'Adventurer':'action',\n 'Alchemist':'action',\n 'Altar':'action',\n 'Ambassador':'action',\n 'Apothecary':'action',\n 'Apprentice':'action',\n 'Armory':'action',\n 'Band of Misfits':'action',\n 'Bandit Camp':'action',\n 'Baron':'action',\n 'Bazaar':'action',\n 'Bishop':'action',\n 'Bridge':'action',\n 'Bureaucrat':'action',\n 'Cartographer':'action',\n 'Catacombs':'action',\n 'Cellar':'action',\n 'Chancellor':'action',\n 'Chapel':'action',\n 'City':'action',\n 'Conspirator':'action',\n 'Council Room':'action',\n 'Courtyard':'action',\n 'Crossroads':'action',\n 'Cultist':'action',\n 'Cutpurse':'action',\n 'Dame Anna':'action',\n 'Dame Molly':'action',\n 'Dame Natalie':'action',\n 'Dame Sylvia':'action',\n 'Death Cart':'action',\n 'Develop':'action',\n 'Duchess':'action',\n 'Embargo':'action',\n 'Embassy':'action',\n 'Envoy':'action',\n 'Expand':'action',\n 'Explorer':'action',\n 'Familiar':'action',\n 'Feast':'action',\n 'Festival':'action',\n 'Followers':'action',\n 'Forager':'action',\n 'Forge':'action',\n 'Fortress':'action',\n 'Fortune Teller':'action',\n 'Ghost Ship':'action',\n 'Golem':'action',\n 'Goons':'action',\n 'Governor':'action',\n 'Graverobber':'action',\n 'Haggler':'action',\n 'Hamlet':'action',\n 'Harvest':'action',\n 'Herbalist':'action',\n 'Hermit':'action',\n 'Highway':'action',\n 'Hunting Grounds':'action',\n 'Hunting Party':'action',\n 'Inn':'action',\n 'Ironmonger':'action',\n 'Ironworks':'action',\n 'JackOfAllTrades':'action',\n 'Jester':'action',\n 'Junk Dealer':'action',\n 'King\\'s Court':'action',\n 'Knights':'action',\n 'Laboratory':'action',\n 'Lookout':'action',\n 'Madman':'action',\n 'Mandarin':'action',\n 'Marauder':'action',\n 'Margrave':'action',\n 'Masquerade':'action',\n 'Menagerie':'action',\n 'Mercenary':'action',\n 'Militia':'action',\n 'Minion':'action',\n 'Mint':'action',\n 'Moneylender':'action',\n 'Monument':'action',\n 'Mountebank':'action',\n 'Mystic':'action',\n 'Navigator':'action',\n 'Noble Brigand':'action',\n 'Nomad Camp':'action',\n 'Oasis':'action',\n 'Oracle':'action',\n 'Pawn':'action',\n 'Pearl Diver':'action',\n 'Peddler':'action',\n 'Pillage':'action',\n 'Pirate Ship':'action',\n 'Poor House':'action',\n 'Possession':'action',\n 'Prince':'action',\n 'Princess':'action',\n 'Procession':'action',\n 'Rabble':'action',\n 'Rats':'action',\n 'Rebuild':'action',\n 'Remake':'action',\n 'Remodel':'action',\n 'Rogue':'action',\n 'Saboteur':'action',\n 'Sage':'action',\n 'Salvager':'action',\n 'Scavenger':'action',\n 'Scheme':'action',\n 'Scout':'action',\n 'Scrying Pool':'action',\n 'Sea Hag':'action',\n 'Shanty Town':'action',\n 'Sir Bailey':'action',\n 'Sir Destry':'action',\n 'Sir Martin':'action',\n 'Sir Michael':'action',\n 'Sir Vander':'action',\n 'Smithy':'action',\n 'Smugglers':'action',\n 'Spice Merchant':'action',\n 'Spy':'action',\n 'Squire':'action',\n 'Stables':'action',\n 'Steward':'action',\n 'Storeroom':'action',\n 'Swindler':'action',\n 'Thief':'action',\n 'Throne Room':'action',\n 'Torturer':'action',\n 'Tournament':'action',\n 'Trade Route':'action',\n 'Trading Post':'action',\n 'Transmute':'action',\n 'Treasure Map':'action',\n 'Treasury':'action',\n 'Tribute':'action',\n 'Trusty Steed':'action',\n 'University':'action',\n 'Upgrade':'action',\n 'Urchin':'action',\n 'Vagrant':'action',\n 'Vault':'action',\n 'Wandering Minstrel':'action',\n 'Warehouse':'action',\n 'Wishing Well':'action',\n 'Witch':'action',\n 'Young Witch':'action',\n 'Woodcutter':'action',\n 'Workshop':'action',\n 'Beggar':'action-reaction',\n 'Watchtower':'action-reaction',\n 'Horse Traders':'action-reaction',\n 'Moat':'action-reaction',\n 'Secret Chamber':'action-reaction',\n 'Trader':'action-reaction',\n 'Bank':'treasure',\n 'Cache':'treasure',\n 'Contraband':'treasure',\n 'Counterfeit':'treasure',\n 'Diadem':'treasure',\n 'Hoard':'treasure',\n 'Horn of Plenty':'treasure',\n 'Ill-Gotten Gains':'treasure',\n 'Loan':'treasure',\n 'Philosopher\\'s Stone':'treasure',\n 'Platinum':'treasure',\n 'Potion':'treasure',\n 'Quarry':'treasure',\n 'Royal Seal':'treasure',\n 'Silver':'treasure',\n 'Spoils':'treasure',\n 'Stash':'treasure',\n 'Talisman':'treasure',\n 'Venture':'treasure',\n 'Colony':'victory',\n 'Duchy':'victory',\n 'Duke':'victory',\n 'Fairgrounds':'victory',\n 'Farmland':'victory',\n 'Feodum':'victory',\n 'Gardens':'victory',\n 'Province':'victory',\n 'Silk Road':'victory',\n 'Vineyard':'victory',\n 'Caravan':'action-duration',\n 'Haven':'action-duration',\n 'Lighthouse':'action-duration',\n 'Merchant Ship':'action-duration',\n 'Outpost':'action-duration',\n 'Tactician':'action-duration',\n 'Wharf':'action-duration',\n 'Survivors':'action-ruins',\n 'Dame Josephine':'action-victory',\n 'Great Hall':'action-victory',\n 'Nobles':'action-victory',\n 'Island':'action-victory',\n 'Harem':'treasure-victory',\n 'Hovel':'shelter-reaction',\n 'Necropolis':'action-shelter',\n 'Tunnel':'victory-reaction',\n 'victory point chips':'vp-chip',\n 'Curse':'curse',\n 'Candlestick Maker':'action',\n 'Stonemason':'action',\n 'Doctor':'action',\n 'Masterpiece':'treasure',\n 'Advisor':'action',\n 'Herald':'action',\n 'Plaza':'action',\n 'Taxman':'action-attack',\n 'Baker':'action',\n 'Butcher':'action',\n 'Journeyman':'action',\n 'Merchant Guild':'action',\n 'Soothsayer':'action-attack',\n}\n\n# These help disambiguate actions taken based on the last action played\n# TODO IGG gains a copper to hand on play, but gains a curse to discard on buy\n# must disambiguate between the two (probably has its own edge case)\n# TODO Beggar on play vs reaction\nGAIN_TO_HAND = [\n 'Mine', 'Trading Post', 'Torturer', 'Explorer', 'Ill-Gotten Gains', 'Beggar',\n]\n\n# these are cards that gain from somewhere not in the supply (usually a trashing attack)\n# for these purposes we treat Spoils, Madman, as supply piles\n# TODO find out if Graverobber gain is from trash or from supply\nGAIN_FROM_ELSEWHERE = [\n 'Thief', 'Noble Brigand', 'Rogue', 'Graverobber'\n]\n\n# Treasure Map is not in this list because it's an odd edge case\n# It's handled explicitly elsewhere\n# TODO since extra play lines are removed, these occasionally may act weird\n# if they are Throned or Counterfeited, and in particular Procession is broken\n# FIX THIS\n# TODO handle Hermit\n# Hermit trashes from hand, or discard, or from play when no cards are bought\n# for now this ignore all of that.\n# TODO handle Death Cart\n# Death Cart trashes either itself or a card from hand\n# need to check between the two\n# Fortress is very special and handled back in the parser\n# TODO handle Knights\n# (both trash from play if Knight revealed, or from revealed cards, or for Dame Anna from hand)\n# for now ignore it all\nTRASHES_FROM_PLAY = ['Feast', 'Mining Village', 'Horn of Plenty', 'Hermit', 'Urchin', 'Death Cart', 'Procession', 'Counterfeit', 'Pillage', 'Embargo',\n 'Dame Anna', 'Dame Josephine', 'Dame Molly', 'Dame Natalie', 'Dame Sylvia',\n 'Sir Bailey', 'Sir Destry', 'Sir Martin', 'Sir Michael', 'Sir Vander',\n]\n\nTRASHES_FROM_REVEAL = [\n 'Thief', 'Swindler', 'Saboteur', 'Noble Brigand', 'Lookout', 'Pirate Ship', 'Loan', 'Rebuild', 'Rogue',\n 'Dame Anna', 'Dame Josephine', 'Dame Molly', 'Dame Natalie', 'Dame Sylvia',\n 'Sir Bailey', 'Sir Destry', 'Sir Martin', 'Sir Michael', 'Sir Vander',\n 'Doctor',\n]\n\n# TODO handle Sir Michael\nDISCARD_FROM_REVEAL = [\n 'Library', 'Hunting Party', 'Spy', 'Thief', 'Adventurer', 'Saboteur', 'Tribute', 'Navigator', 'Pirate Ship', 'Sea Hag', 'Noble Brigand', 'Scrying Pool', 'Golem', 'Loan', 'Rabble', 'Venture', 'Fortune Teller', 'Farming Village', 'Harvest', 'Jester', 'Duchess', 'Oracle', 'JackOfAllTrades', 'Cartographer', 'Sage', 'Ironmonger', 'Wandering Minstrel', 'Catacombs', 'Rebuild', 'Rogue', 'Survivors',\n 'Dame Anna', 'Dame Josephine', 'Dame Molly', 'Dame Natalie', 'Dame Sylvia',\n 'Sir Bailey', 'Sir Destry', 'Sir Martin', 'Sir Michael', 'Sir Vander',\n 'Advisor', 'Journeyman', 'Envoy', 'Lookout',\n]\n\nTOPDECKS_FROM_REVEAL = [\n 'Spy', 'Wishing Well', 'Scout', 'Pearl Diver', 'Lookout', 'Navigator', 'Apothecary', 'Scrying Pool', 'Rabble', 'Fortune Teller', 'Duchess', 'Oracle', 'Cartographer', 'Scavenger', 'Wandering Minstrel', 'Survivors', 'Doctor', 'Herald', 'Vagrant', 'JackOfAllTrades','Ironmonger'\n]\n\n# TODO all of these cards are triggered in cleanup\n# So, they may not be the resolving action anymore\n# need to handle this properly\nTOPDECKS_FROM_PLAY = ['Treasury', 'Herbalist', 'Alchemist']\n\n# Note - Horse Traders reaction works by luck\n# since for every Attack in the game, it is not in the list below,\n# so the HT revealed is correctly set aside\n# TODO make this robust and explicit\nSETS_ASIDE_FROM_DECK = ['Native Village']\n\n# TODO implement Watchtower, Mint on gain, Royal Seal topdeck, Walled Village, reactions...\n# In general, do effects that occur when the card is NOT being played\n# TODO implement Band of Misfits (oh my god please no)\n# TODO implement Black Market\n\nTOPDECKS_ON_BUY = ['Herald', 'Inn', 'Doctor']\n# Nomad Camp isn't actually needed here, the NC topdeck isn't logged\n# just here for completion\nTOPDECKS_ON_GAIN = ['Inn', 'Nomad Camp']\nTRASHES_ON_BUY = ['Doctor', 'Mint', 'Noble Brigand']\nDISCARD_ON_BUY = ['Doctor', 'Noble Brigand']\n\nRETURN_TO_SUPPLY_ON_PLAY = ['Spoils', 'Madman']\n","sub_path":"parser/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":12469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"134454143","text":"# Copyright 2018, Jarsa Sistemas, S.A. de C.V.\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom odoo import _, api, fields, models\n\n\nclass ProjectBillingRequestWizard(models.TransientModel):\n _name = 'project.billing.request.wizard'\n\n line_ids = fields.One2many('project.billing.request.wizard.line', 'wiz_id')\n project_id = fields.Many2one('project.project')\n\n @api.model\n def _prepare_item(self, line):\n return {\n 'income_id': line.id,\n 'name': line.name,\n 'remaining_qty': line.remaining_qty,\n 'qty': line.remaining_qty,\n 'amount': line.amount,\n }\n\n @api.model\n def default_get(self, fields):\n res = super().default_get(fields)\n project = self.env['project.project'].browse(\n self._context.get('active_id'))\n lines = []\n for line in project.mapped('income_ids'):\n if line.remaining_qty <= 0:\n continue\n lines.append([0, 0, self._prepare_item(line)])\n res.update({\n 'line_ids': lines,\n 'project_id': self._context.get('active_id'),\n })\n return res\n\n @api.multi\n def create_billing(self):\n unit = self.env.ref('product.product_uom_unit')\n for rec in self:\n lines = []\n for line in rec.line_ids:\n ref = False\n active_order = False\n if line.qty == line.remaining_qty:\n ref = _(\n 'Total Billing of: Project: %s - Quantity: %s') % (\n self.project_id.name, line.qty)\n active_order = False\n elif line.qty < line.remaining_qty:\n ref = _(\n 'Partial Billing of: Project: %s - Quantity: %s') % (\n self.project_id.name, line.qty)\n active_order = True\n lines.append(\n (0, 0,\n {\n 'account_id': (\n self.env.user.company_id.product_id.\n property_account_income_id.id\n if self.env.user.company_id.product_id.\n property_account_income_id.id\n else\n self.env.user.company_id.product_id.\n categ_id.property_account_income_categ_id.id),\n 'ref': ref,\n 'price_unit': line.amount,\n 'product_uom_id': unit.id,\n 'quantity': line.qty,\n 'income_id': line.income_id.id,\n 'amount': (\n line.amount * line.qty),\n 'account_analytic_id': (\n self.project_id.analytic_account_id.id),\n 'has_active_order': active_order,\n })\n )\n res = self.env['analytic.billing.plan'].create({\n 'customer_id': self.project_id.partner_id.id,\n 'date': fields.Date.today(),\n 'project_id': self.project_id.id,\n 'currency_id': self.env.user.company_id.currency_id.id,\n 'analytic_billing_plan_line_ids': lines,\n })\n return {\n 'name': _('Billing Request'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'analytic.billing.plan',\n 'res_id': res.id,\n 'target': 'current',\n 'type': 'ir.actions.act_window',\n }\n\n\nclass ProjectBillingRequestWizardLine(models.TransientModel):\n _name = 'project.billing.request.wizard.line'\n\n wiz_id = fields.Many2one('project.billing.request.wizard')\n income_id = fields.Many2one('project.income')\n name = fields.Char()\n remaining_qty = fields.Float()\n amount = fields.Float()\n qty = fields.Float()\n","sub_path":"project_billing_plan/wizards/project_billing_request_wizard.py","file_name":"project_billing_request_wizard.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"203623584","text":"\"\"\"\nProgram: Card Dealer Program Challenge\nName: Jasmohan Bawa\nDate: January 23, 2017\nAbout: Performs all functionality of Card Dealer, but now no card will be repeated when dealing out cards\nParent Program: card_dealer\n\"\"\"\n\nimport random\n\nVALUES = (\"Ace of\", \"2 of\", \"3 of\", \"4 of\", \"5 of\", \"6 of\", \"7 of\", \"8 of\", \"9 of\", \"10 of\", \"Jack of\", \"Queen of\", \"King of\")\nSUITS = (\" Spades\", \" Clubs\", \" Hearts\", \" Diamonds\")\ncards_used = []\n\nto_continue = True\n\nwhile to_continue is True:\n\n print(\"The number of Players multiplied by the Number of Cards per Player must be less than or equal to 52 in order for everyone to receive a card\")\n max_reached = True\n \n while max_reached is True:\n hands = int(input(\"Please enter the number of players playing: \"))\n cards = int(input(\"Please enter the number of cards per player: \"))\n if(hands * cards) <= 52:\n max_reached = False\n else:\n print(\"Please enter a set of values that are less than or equal to 52 when multiplied\\n\")\n \n cards_used = [] \n for i in range(0, hands):\n print(\"\\nHand \", (i + 1))\n \n for j in range(0, cards):\n \n is_added = False\n \n while is_added is False:\n current_card = (random.choice(VALUES) + random.choice(SUITS))\n if current_card not in cards_used:\n print(current_card)\n cards_used.append(current_card)\n is_added = True\n \n user_continue = input(\"\\nWould you like to continue? (y/n)\")\n correct_response = False\n \n while correct_response is False:\n if(user_continue.lower() == \"y\"):\n correct_response = True\n to_continue = True\n \n elif(user_continue.lower() == \"n\"):\n correct_response = True\n to_continue = False\n \n else:\n correct_response = False\n print(\"Invalid input, please type: Y or N\")\n user_continue = input(\"\\nWould you like to continue? (y/n)\")\n \nprint(\"Come Back Soon!!!\")","sub_path":"Lab4/no_cheating.py","file_name":"no_cheating.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"90474404","text":"#!/usr/bin/python3\n\nimport mysql.connector\nimport datetime\n\ndef connect():\n # Connextion à la base de données locale\n db = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"V8eOFR%_\",\n database=\"employee\"\n )\n return db\n \ndef execute(query):\n # Execution d'une requête générique et renvoi des resultats\n try:\n db = connect()\n cursor = db.cursor()\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n db.close()\n return result\n except mysql.connector.Error as err:\n print('[-] An error occured while executing the following query: {} - {}'.format(query, type(err)))\n # On log dans un fichier si jamais une erreur arrive\n f = open('errors.log', 'a')\n f.write('ERROR: {}\\nQuery : {}\\n'.format(err, query))\n f.close()\n return None\n\ndef castDate(date):\n # On cast le type date qui provient de MySQL en datetime pour MongoDB\n dt = datetime.datetime.combine(date, datetime.datetime.min.time())\n return dt\n\ndef castDateFields(document):\n # Cast tous les champs de type date en datetime\n for key in document:\n if type(document[key]) is datetime.date:\n document[key] = castDate(document[key])\n return document\n\ndef getManagerTitle(deptManager, fromDate, toDate):\n # Utilitaire pour mapper les dates des titres aux dates de dept_manager,\n # afin de savoir si l'employé était manager lorsqu'il occupait ce poste\n for row in deptManager:\n if(row[2] == fromDate and row[3] == toDate):\n return row\n \ndef dump_employee(empNo):\n result = execute('SELECT * from employees WHERE emp_no = {}'.format(empNo))\n result = result[0]\n employee = {\n \"_id\": result[0],\n \"emp_no\": result[0],\n \"birth_date\": result[1],\n \"first_name\": result[2],\n \"last_name\": result[3],\n \"gender\": result[4],\n \"hire_date\": result[5]\n }\n titles = dump_titles(empNo)\n if titles is not None:\n employee[\"titles\"] = titles\n salaries = dump_salaries(empNo)\n if salaries is not None:\n employee[\"salaries\"] = salaries\n employee = dump_depts(empNo, employee)\n return castDateFields(employee)\n\ndef dump_titles(empNo):\n try:\n # Récupération des titres d'un employé\n result = execute('SELECT * FROM titles WHERE emp_no = {}'.format(empNo))\n # Récupération des dates où un employé a été manager\n mgmt = execute('SELECT * FROM dept_manager WHERE emp_no = {}'.format(empNo))\n titles = []\n for row in result:\n title = {\n \"title\": row[1],\n \"from_date\": row[2],\n \"to_date\": row[3]\n }\n title[\"isManager\"] = True if getManagerTitle(mgmt, row[2], row[3]) is not None else False\n titles.append(castDateFields(title))\n return titles\n except:\n return []\n\ndef dump_salaries(empNo):\n # Récupération des salaires\n try:\n result = execute('SELECT * FROM salaries WHERE emp_no = {}'.format(empNo))\n salaries = []\n for row in result:\n salary = {\n \"salary\": row[1],\n \"from_date\": row[2],\n \"to_date\": row[3]\n }\n salaries.append(castDateFields(salary))\n return salaries\n except:\n return []\n\ndef dump_depts(empNo, document):\n # Récupération de l'historique des départements d'un employé, classés par date\n result = execute('SELECT * FROM dept_emp dpe INNER JOIN departments d ON dpe.dept_no = d.dept_no WHERE emp_no = {} ORDER BY to_date'.format(empNo))\n dept_history = []\n for row in result:\n dept = {\n \"dept_name\": row[5],\n \"dept_no\": row[1],\n \"from_date\": row[2],\n \"to_date\": row[3]\n }\n # On regarde si l'année est 9999 = année actuelle, sinon on l'ajoute à l'historique\n if(row[3].year == 9999):\n # On met uniquement les champs necéssaires dans current_dept\n dept.pop(\"to_date\", None)\n document[\"current_dept\"] = dept\n else:\n dept_history.append(castDateFields(dept))\n if(len(dept_history) > 0):\n document[\"dept_history\"] = dept_history\n if not \"current_dept\" in document:\n # Si on a pas trouvé de current_dept alors l'employé ne travaille plus dans l'entreprise\n document[\"current_dept\"] = {\n \"dept_name\": \"No longer employed\",\n \"dept_no\": \"d000\",\n \"from_date\": result[-1][3] # date de fin d'embauche (dernière ligne, champ to_date)\n }\n document[\"current_dept\"] = castDateFields(document[\"current_dept\"]) # On cast les types date en datetime\n return document\n\ndef dump_employees_ids():\n # Utilitaire pour récupérer une liste des ids des employés pour les parcourir par la suite\n result = execute('SELECT emp_no FROM employees')\n print(result)\n output = open('ids.txt', 'w')\n ids = []\n for row in result:\n output.write(\"%s\\n\" % row[0])\n return ids\n","sub_path":"A5/cloud/scalability/utils/dumper.py","file_name":"dumper.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"83250245","text":"import math\nimport statistics\nimport numpy as np\nimport operator\nimport pandas as pd\n\n\n#*------- INDICATOR UTILS -------*#\n\n\ndef get_volume_score(volumes):\n \"\"\"\n Considers volumes when scoring for oversold pairs\n\n Args:\n volumes - Volumes to score\n \"\"\"\n\n volume_score = 0\n possible_outlier = get_outlier(volumes)\n \n # Remove outlier high volumes\n if possible_outlier is not None:\n volumes = [x for i, x in enumerate(volumes) if i != possible_outlier]\n\n for entry in volumes:\n volume_score += normalise_datapoint(entry, min(volumes), max(volumes))\n\n return volume_score\n\n\ndef get_outlier(all_values):\n \"\"\"\n Checks whether the max value of a list of values is an \n outlier and returns its index if it is. If there is no outlier this \n function returns None\n \"\"\"\n\n max_index, max_value = max(enumerate(all_values), key=operator.itemgetter(1))\n over_50 = max_value / 2\n\n if max_index > 0 and max_index < len(all_values) - 1:\n if all_values[max_index - 1] < over_50 and all_values[max_index + 1] < over_50:\n return max_index\n \n elif max_index == 0:\n if all_values[1] < over_50 and all_values[2] < over_50:\n return max_index\n \n elif max_index == len(all_values) - 1:\n if all_values[-2] < over_50 and all_values[-3] < over_50:\n return max_index\n \n return None\n\n\ndef get_exponential_moving_average(yesterday, today, n):\n \"\"\"\n Returns the exponential moving average for a given previous EMA.\n The first instance of use will need to be the SMA.\n\n Args:\n yesterday - The previous EMA value\n today - Close price for the interval\n n - How far back to pull EMA from\n \"\"\"\n\n multiplier = 2 / (n + 1)\n return (today * multiplier) + (yesterday * (1 - multiplier))\n\n\ndef get_exponential_moving_average_line(closes, n):\n \"\"\"\n Generates the EMA for a given interval \"n\". Such a separate \n function is not required for SMA, as it can be done with a list \n comprehension\n\n Args:\n closes - Close prices\n n - Interval to get EMA for\n \"\"\"\n\n initial_value = get_simple_moving_average(n, closes, n)\n ema_line = [initial_value]\n\n for price in closes[n + 1:]:\n new_ema = get_exponential_moving_average(ema_line[-1], price, n)\n ema_line.append(new_ema)\n \n return ema_line\n\n\ndef get_standard_devs(index, moving_averages, n=20):\n \"\"\"\n Returns the standard deviation from a set of \"n\" moving averages\n Assumes that there are \"n\" previous MAs to build a std dev from\n\n Args:\n index - Index in \"CLOSE\" to get MA for\n moving_averages - List of MAs\n n - How far back to pull MA from (defaults to 20 entries)\n \"\"\"\n\n ma_entries = moving_averages[index: (index - n): -1]\n return statistics.stdev(ma_entries)\n\n\ndef get_gains_and_losses(closes):\n \"\"\"\n Gets all gains and losses for the provided close prices\n\n Args:\n closes - Close price points\n \"\"\"\n\n gains = []\n losses = []\n compare = closes[0]\n\n for price in closes[1:]:\n percentage = price / compare * 100\n \n if price >= compare:\n gains.append(100 - percentage)\n else:\n losses.append(100 - percentage)\n \n compare = price\n \n return (gains, losses)\n\n\ndef normalise_datapoint(entry, min_value, max_value):\n \"\"\"\n Normalises a datapoint between 0 and 1\n\n Args:\n entry - Entry to normalise\n min_value - Minimum value in the list\n max_value - Maximum value in the list\n \"\"\"\n\n numerator = (entry - min_value)\n return numerator / (max_value - min_value)\n\n","sub_path":"src/indicators/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"24020692","text":"from torch import nn\r\n# from utils import *\r\nimport torch.nn.functional as F\r\nfrom math import sqrt\r\n# from itertools import product as product\r\nimport torchvision\r\nimport torch\r\n\r\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n\r\n\r\n\r\n\r\ndef cxcy_to_xy(cxcy):\r\n \"\"\"\r\n Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max).\r\n :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)\r\n :return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)\r\n \"\"\"\r\n return torch.cat([cxcy[:, :2] - (cxcy[:, 2:] / 2), # x_min, y_min\r\n cxcy[:, :2] + (cxcy[:, 2:] / 2)], 1) # x_max, y_max\r\n\r\n\r\ndef cxcy_to_gcxgcy(cxcy, priors_cxcy):\r\n \"\"\"\r\n Encode bounding boxes (that are in center-size form) w.r.t. the corresponding prior boxes (that are in center-size form).\r\n For the center coordinates, find the offset with respect to the prior box, and scale by the size of the prior box.\r\n For the size coordinates, scale by the size of the prior box, and convert to the log-space.\r\n In the model, we are predicting bounding box coordinates in this encoded form.\r\n :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_priors, 4)\r\n :param priors_cxcy: prior boxes with respect to which the encoding must be performed, a tensor of size (n_priors, 4)\r\n :return: encoded bounding boxes, a tensor of size (n_priors, 4)\r\n \"\"\"\r\n priors_cxcy=priors_cxcy.to(DEVICE)\r\n\r\n # The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical\r\n # They are for some sort of numerical conditioning, for 'scaling the localization gradient'\r\n # See https://github.com/weiliu89/caffe/issues/155\r\n return torch.cat([(cxcy[:, :2] - priors_cxcy[:, :2]) / (priors_cxcy[:, 2:] / 10), # g_c_x, g_c_y\r\n torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5], 1) # g_w, g_h\r\n\r\n\r\ndef gcxgcy_to_cxcy(gcxgcy, priors_cxcy):\r\n \"\"\"\r\n Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above.\r\n They are decoded into center-size coordinates.\r\n This is the inverse of the function above.\r\n :param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4)\r\n :param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4)\r\n :return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4)\r\n \"\"\"\r\n\r\n return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy[:, :2], # c_x, c_y\r\n torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) # w, h\r\n\r\n\r\n\r\ndef xy_to_cxcy(xy):\r\n \"\"\"\r\n Convert bounding boxes from boundary coordinates (x_min, y_min, x_max, y_max) to center-size coordinates (c_x, c_y, w, h).\r\n :param xy: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)\r\n :return: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)\r\n \"\"\"\r\n return torch.cat([(xy[:, 2:] + xy[:, :2]) / 2, # c_x, c_y\r\n xy[:, 2:] - xy[:, :2]], 1) # w, h\r\ndef find_intersection(set_1, set_2):\r\n \"\"\"\r\n Find the intersection of every box combination between two sets of boxes that are in boundary coordinates.\r\n :param set_1: set 1, a tensor of dimensions (n1, 4)\r\n :param set_2: set 2, a tensor of dimensions (n2, 4)\r\n :return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)\r\n \"\"\"\r\n\r\n # PyTorch auto-broadcasts singleton dimensions\r\n # print ('intersection:',set_1.size(),set_2.size())\r\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\r\n\r\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\r\n\r\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\r\n\r\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)\r\ndef find_jaccard_overlap(set_1, set_2):\r\n \"\"\"\r\n Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates.\r\n :param set_1: set 1, a tensor of dimensions (n1, 4)\r\n :param set_2: set 2, a tensor of dimensions (n2, 4)\r\n :return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)\r\n \"\"\"\r\n\r\n # Find intersections\r\n\r\n\r\n set_2=set_2.to(DEVICE)\r\n # print ('set:\\n\\n', set_1, '\\n\\n', set_2)\r\n intersection = find_intersection(set_1, set_2) # (n1, n2)\r\n\r\n # Find areas of each box in both sets\r\n areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) # (n1)\r\n areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) # (n2)\r\n\r\n # Find the union\r\n # PyTorch auto-broadcasts singleton dimensions\r\n union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection # (n1, n2)\r\n\r\n return intersection / union # (n1, n2)\r\nclass MultiBoxLoss(nn.Module):\r\n \"\"\"\r\n The MultiBox loss, a loss function for object detection.\r\n This is a combination of:\r\n (1) a localization loss for the predicted locations of the boxes, and\r\n (2) a confidence loss for the predicted class scores.\r\n \"\"\"\r\n\r\n def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):\r\n super(MultiBoxLoss, self).__init__()\r\n self.priors_cxcy = priors_cxcy\r\n self.priors_xy = cxcy_to_xy(priors_cxcy)\r\n self.threshold = threshold\r\n self.neg_pos_ratio = neg_pos_ratio\r\n self.alpha = alpha\r\n\r\n self.smooth_l1 = nn.L1Loss()\r\n self.cross_entropy = nn.CrossEntropyLoss(reduce=False)\r\n\r\n def forward(self, predicted_locs, predicted_scores, boxes, labels):\r\n \"\"\"\r\n Forward propagation.\r\n :param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)\r\n :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)\r\n :param boxes: true object bounding boxes in boundary coordinates, a list of N tensors\r\n :param labels: true object labels, a list of N tensors\r\n :return: multibox loss, a scalar\r\n \"\"\"\r\n batch_size = predicted_locs.size(0)\r\n n_priors = self.priors_cxcy.size(0)\r\n n_classes = predicted_scores.size(2)\r\n # print('Batch Size',batch_size)\r\n assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)\r\n\r\n true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(DEVICE) # (N, 8732, 4)\r\n true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(DEVICE) # (N, 8732)\r\n\r\n # For each image\r\n for i in range(batch_size):\r\n # print('\\n\\nBatch :',i)\r\n\r\n n_objects = boxes[i].size(0)\r\n # print (boxes[i])\r\n # print ('have :',n_objects,' objects')\r\n\r\n overlap = find_jaccard_overlap(boxes[i],\r\n self.priors_xy) # (n_objects, 8732)\r\n # print ('overlap:',overlap.size())\r\n\r\n # For each prior, find the object that has the maximum overlap\r\n overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) # (8732)\r\n # print (object_for_each_prior,object_for_each_prior)\r\n\r\n # We don't want a situation where an object is not represented in our positive (non-background) priors -\r\n # 1. An object might not be the best object for all priors, and is therefore not in object_for_each_prior.\r\n # 2. All priors with the object may be assigned as background based on the threshold (0.5).\r\n\r\n # To remedy this -\r\n # First, find the prior that has the maximum overlap for each object.\r\n _, prior_for_each_object = overlap.max(dim=1) # (N_o)\r\n\r\n # Then, assign each object to the corresponding maximum-overlap-prior. (This fixes 1.)\r\n object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(DEVICE)\r\n\r\n # To ensure these priors qualify, artificially give them an overlap of greater than 0.5. (This fixes 2.)\r\n overlap_for_each_prior[prior_for_each_object] = 1.\r\n\r\n # Labels for each prior\r\n label_for_each_prior = labels[i][object_for_each_prior] # (8732)\r\n # Set priors whose overlaps with objects are less than the threshold to be background (no object)\r\n label_for_each_prior[overlap_for_each_prior < self.threshold] = 0 # (8732)\r\n\r\n # Store\r\n true_classes[i] = label_for_each_prior\r\n\r\n # Encode center-size object coordinates into the form we regressed predicted boxes to\r\n true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) # (8732, 4)\r\n\r\n # Identify priors that are positive (object/non-background)\r\n positive_priors = true_classes != 0 # (N, 8732)\r\n\r\n # LOCALIZATION LOSS\r\n\r\n # Localization loss is computed only over positive (non-background) priors\r\n loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) # (), scalar\r\n # print ('\\n\\nlocal loss',loc_loss)\r\n\r\n # Note: indexing with a torch.uint8 (byte) tensor flattens the tensor when indexing is across multiple dimensions (N & 8732)\r\n # So, if predicted_locs has the shape (N, 8732, 4), predicted_locs[positive_priors] will have (total positives, 4)\r\n\r\n # CONFIDENCE LOSS\r\n\r\n # Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image\r\n # That is, FOR EACH IMAGE,\r\n # we will take the hardest (neg_pos_ratio * n_positives) negative priors, i.e where there is maximum loss\r\n # This is called Hard Negative Mining - it concentrates on hardest negatives in each image, and also minimizes pos/neg imbalance\r\n\r\n # Number of positive and hard-negative priors per image\r\n n_positives = positive_priors.sum(dim=1) # (N)\r\n n_hard_negatives = self.neg_pos_ratio * n_positives # (N)\r\n\r\n # First, find the loss for all priors\r\n conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) # (N * 8732)\r\n conf_loss_all = conf_loss_all.view(batch_size, n_priors) # (N, 8732)\r\n\r\n # We already know which priors are positive\r\n conf_loss_pos = conf_loss_all[positive_priors] # (sum(n_positives))\r\n # print('\\n\\nconfidence loss',conf_loss_pos)\r\n\r\n # Next, find which priors are hard-negative\r\n # To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives\r\n conf_loss_neg = conf_loss_all.clone() # (N, 8732)\r\n conf_loss_neg[positive_priors] = 0. # (N, 8732), positive priors are ignored (never in top n_hard_negatives)\r\n conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True) # (N, 8732), sorted by decreasing hardness\r\n hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(DEVICE) # (N, 8732)\r\n\r\n hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) # (N, 8732)\r\n\r\n conf_loss_hard_neg = conf_loss_neg[hard_negatives] # (sum(n_hard_negatives))\r\n # print ('hard',conf_loss_neg)\r\n\r\n # As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors\r\n conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float() # (), scalar\r\n # print ('total:',conf_loss + self.alpha * loc_loss)\r\n # exit(0)\r\n # TOTAL LOSS\r\n\r\n return conf_loss , loc_loss\r\n\r\n\r\n","sub_path":"Tools/MultiBox.py","file_name":"MultiBox.py","file_ext":"py","file_size_in_byte":12042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"431843131","text":"#! /usr/bin/env python\r\n\r\n\"\"\"\r\n@author: Ajay Arunachalam\r\nCreated on: 25/10/2021\r\nTraining the forecasting and Nowcasting model\r\nVersion: 0.0.5\r\n\"\"\"\r\n\r\n\r\nimport torch\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\nimport torch.optim as optim\r\nfrom .utility import *\r\nfrom .dpp import *\r\n#from .forecast_ml import *\r\nfrom .forecast_ml_extension import *\r\nfrom .denoise import *\r\nfrom .similarity import *\r\nfrom .gnn_layer import *\r\nfrom .stats import *\r\n\r\nclass Forecast:\r\n\r\n\tts = globals()\r\n\tfc = globals()\r\n\r\n\tselect_model = globals() # Possible values ['rnn','lstm', 'gru', 'em', etc]\r\n\tselect_user_path = globals() # Provide user_path './forecast_folder/'\r\n\tselect_scaler = globals() # Possible values ['minmax','standard','maxabs','robust']\r\n\tforecast_window = globals() # no. of timesteps/points to be used for the forecasting model and nowcasting period\r\n\r\n\thidden_dim = globals()\r\n\tlayer_dim = globals()\r\n\tbatch_size = globals()\r\n\tdropout = globals()\r\n\tn_epochs = globals()\r\n\tlearning_rate = globals()\r\n\tweight_decay = globals()\r\n\r\n\r\n\tdef set_variable(**kwargs):\r\n\t\tfor key, value in kwargs.items():\r\n\t\t\tprint(\"{0} = {1}\" .format(key,value))\r\n\r\n\t\tts = list(kwargs.values())[0]\r\n\t\tfc = list(kwargs.values())[1]\r\n\r\n\t\treturn ts, fc\r\n\r\n\tassert ts == ts\r\n\tassert fc == fc\r\n\r\n\tdef set_model_config(**kwargs):\r\n\r\n\t\tfor key, value in kwargs.items():\r\n\t\t\tprint(\"{0} = {1}\" .format(key,value))\r\n\r\n\t\tselect_model = list(kwargs.values())[0]\r\n\t\tselect_user_path = list(kwargs.values())[1]\r\n\t\tselect_scaler = list(kwargs.values())[2]\r\n\t\tforecast_window = list(kwargs.values())[3]\r\n\r\n\t\treturn select_model, select_user_path, select_scaler, forecast_window\r\n\r\n\tassert select_model == select_model\r\n\tassert select_user_path == select_user_path\r\n\tassert select_scaler == select_scaler\r\n\tassert forecast_window == forecast_window\r\n\r\n\tdef hyperparameter_config(**kwargs):\r\n\r\n\t\tfor key, value in kwargs.items():\r\n\t\t\tprint(\"{0} = {1}\" .format(key,value))\r\n\r\n\t\thidden_dim = list(kwargs.values())[0]\r\n\t\tlayer_dim = list(kwargs.values())[1]\r\n\t\tbatch_size = list(kwargs.values())[2]\r\n\t\tdropout = list(kwargs.values())[3]\r\n\t\tn_epochs = list(kwargs.values())[4]\r\n\t\tlearning_rate = list(kwargs.values())[5] \r\n\t\tweight_decay = list(kwargs.values())[6]\r\n\r\n\t\treturn hidden_dim, layer_dim, batch_size, dropout, n_epochs, learning_rate, weight_decay\r\n\r\n\tassert hidden_dim == hidden_dim\r\n\tassert layer_dim == layer_dim\r\n\tassert batch_size == batch_size\r\n\tassert dropout == dropout\r\n\tassert n_epochs == n_epochs\r\n\tassert learning_rate == learning_rate\r\n\tassert weight_decay == weight_decay\r\n\r\n\tdef forecast(df, ts, fc, opt, scaler, period:int, fq:str, select_scaler=select_scaler, ):\r\n\r\n\t\tdigit = ''.join(filter(str.isdigit, str(fq)))\r\n\t\tinterval = int(digit)\r\n\r\n\t\tff_df = Helper.make_future_df(df, ts, period, fq)\r\n\r\n\t\tprint(f'Forecast period dataframe: {ff_df.index}')\r\n\r\n\t\t#print(f'Forecast period dataframe: {ff_df.index.hour}')\r\n\r\n\t\t#cols=['hour','month','day','day_of_week','week_of_year']\r\n\r\n\t\tfrequency = ''.join([i for i in fq if not i.isdigit()])\r\n\r\n\t\t#if any(str(frequency).startswith(tuple(l)) for l in ['h', 'H', 's', 'S', 'min', 'MIN', 'n', 'N']) and interval is not None:\r\n\t\tif frequency in ['h', 'H', 's', 'S', 'min', 'MIN', 'n', 'N'] and interval is not None:\r\n\r\n\r\n\t\t#if str(fq)=='h' or str(fq)=='H' or str(fq) == 's' or str(fq) == 'S' or str(fq) == 'min' or str(fq) == 'MIN' or str(fq) == 'n' or str(fq) == 'N' or interval is not None: # hourly(h:m:s)\r\n\r\n\t\t\tff_full_features = Features.generate_date_time_features_hour(ff_df, ['hour','month','day','day_of_week','week_of_year'])\r\n\t\t\tff_full_features = Features.generate_cyclic_features(ff_full_features, 'hour', 24, 0)\r\n\t\t\tff_full_features = Features.generate_cyclic_features(ff_full_features, 'day_of_week', 7, 0)\r\n\t\t\tff_full_features = Features.generate_cyclic_features(ff_full_features, 'month', 12, 1)\r\n\t\t\tff_full_features = Features.generate_cyclic_features(ff_full_features, 'week_of_year', 52, 0)\r\n\r\n\t\t\tff_full_features = Features.generate_other_related_features(df=ff_full_features)\r\n\r\n\t\telif frequency in ['d', 'D', 'w', 'W', 'm', 'M', 'q', 'Q', 'QS', '2q', '2Q', 'HA', 'Y', 'y', 'A'] and interval is not None:\r\n\r\n\t\t#elif any(str(frequency).startswith(tuple(l)) for l in ['d', 'D', 'w', 'W', 'm', 'M', 'q', 'Q', 'QS', '2q', '2Q', 'HA', 'Y', 'y', 'A']) and interval is not None:\r\n\r\n\t\t#elif str(fq) == 'd' or str(fq) == 'D' or str(fq) == 'w' or str(fq) == 'W' or str(fq)=='m' or str(fq)=='M' or str(fq) == 'q' or str(fq) == 'Q' or str(fq) == 'QS' or str(fq) == '2q' or str(fq) == '2Q' or str(fq) == 'HA' or str(fq) == 'y' or str(fq) == 'Y' or str(fq) == 'A' or interval is not None: # Yearly(Daily, weekly, monthly, quater, semi-annual, annual)\r\n\r\n\t\t\tff_full_features = Features.generate_date_time_features_month(ff_df, ['month','day_of_week','week_of_year'])\r\n\t\t\tff_full_features = Features.generate_cyclic_features(ff_full_features, 'day_of_week', 7, 0)\r\n\t\t\tff_full_features = Features.generate_cyclic_features(ff_full_features, 'month', 12, 1)\r\n\t\t\tff_full_features = Features.generate_cyclic_features(ff_full_features, 'week_of_year', 52, 0)\r\n\r\n\t\t\tff_full_features = Features.generate_other_related_features(df=ff_full_features)\r\n\r\n\r\n\t\tX = ff_full_features\r\n\t\t\r\n\t\tinput_dim = len(X.columns)\r\n\t\t#X, y = Helper.predictor_outcome_split(df, target_col)\r\n\t\tX_arr = Helper.apply_transformation_forecast(X, select_scaler)\r\n\r\n\t\tunseen_loader = Helper.prepare_pytorch_data_forecast_df(X_arr)\r\n\r\n\t\tpredictions = opt.predict(\r\n\t\t\tunseen_loader,\r\n\t\t\tbatch_size=1,\r\n\t\t\tn_features=input_dim\r\n\t\t)\r\n\r\n\t\tff_result = Helper.forecast_window_inference(predictions, ff_df, scaler)\r\n\t\tprint(f'Forecast period predictions: {ff_result}')\r\n\r\n\t\tHelper.plot_forecast(ff_result, fc)\r\n\r\n\t\tforecasted_data = Helper.save_final_data(df, ff_result, ts, fc)\r\n\t\tff_full_features_ = pd.concat([ff_result, ff_full_features], axis=1)\r\n\t\treturn forecasted_data, ff_full_features, ff_full_features_\r\n\r\n\r\n\tdef train(df, target_col, split_ratio:float, select_model=select_model, select_scaler=select_scaler, forecast_window=forecast_window, hidden_dim=hidden_dim, layer_dim=layer_dim, batch_size=batch_size,dropout=dropout, n_epochs=n_epochs, learning_rate=learning_rate, weight_decay=weight_decay):\r\n\t\tfrom torch.utils.data import TensorDataset, DataLoader\r\n\r\n\t\tX_train, X_val, X_test, y_train, y_val, y_test = Helper.train_val_test_split(df, target_col, split_ratio) #'value', 0.2\r\n\t\tX_train_arr, X_val_arr, X_test_arr, y_train_arr, y_val_arr, y_test_arr, scaler = Helper.apply_transformation(X_train, X_val, X_test, y_train, y_val, y_test, select_scaler)\r\n\t\t\r\n\t\ttrain_loader, val_loader, test_loader, test_loader_one = Helper.prepare_pytorch_data(X_train_arr, X_val_arr, X_test_arr, y_train_arr, y_val_arr, y_test_arr, batch_size=batch_size)\r\n\t\t'''\r\n\r\n\t\tscaler = Helper.get_scaler(str(select_scaler)) #'minmax'\r\n\t\tX_train_arr = scaler.fit_transform(X_train)\r\n\t\tX_val_arr = scaler.transform(X_val)\r\n\t\tX_test_arr = scaler.transform(X_test)\r\n\r\n\t\ty_train_arr = scaler.fit_transform(y_train)\r\n\t\ty_val_arr = scaler.transform(y_val)\r\n\t\ty_test_arr = scaler.transform(y_test)\r\n\r\n\t\t#batch_size = 64\r\n\r\n\t\ttrain_features = torch.Tensor(X_train_arr)\r\n\t\ttrain_targets = torch.Tensor(y_train_arr)\r\n\t\tval_features = torch.Tensor(X_val_arr)\r\n\t\tval_targets = torch.Tensor(y_val_arr)\r\n\t\ttest_features = torch.Tensor(X_test_arr)\r\n\t\ttest_targets = torch.Tensor(y_test_arr)\r\n\r\n\t\ttrain = TensorDataset(train_features, train_targets)\r\n\t\tval = TensorDataset(val_features, val_targets)\r\n\t\ttest = TensorDataset(test_features, test_targets)\r\n\r\n\t\ttrain_loader = DataLoader(train, batch_size=batch_size, shuffle=False, drop_last=True)\r\n\t\tval_loader = DataLoader(val, batch_size=batch_size, shuffle=False, drop_last=True)\r\n\t\ttest_loader = DataLoader(test, batch_size=batch_size, shuffle=False, drop_last=True)\r\n\t\ttest_loader_one = DataLoader(test, batch_size=1, shuffle=False, drop_last=True)\r\n\t\t'''\r\n\r\n\t\tinput_dim = len(X_train.columns)\r\n\t\t# output_dim = 1\r\n\t\t# hidden_dim = 64\r\n\t\t# layer_dim = 3\r\n\t\t# batch_size = 64\r\n\t\t# dropout = 0.2\r\n\t\t# n_epochs = 5\r\n\t\t# learning_rate = 1e-3\r\n\t\t# weight_decay = 1e-6\r\n\r\n\t\toutput_dim = 1\r\n\t\thidden_dim = hidden_dim\r\n\t\tlayer_dim = layer_dim\r\n\t\tbatch_size = batch_size\r\n\t\tdropout = dropout\r\n\t\tn_epochs = n_epochs\r\n\t\tlearning_rate = learning_rate\r\n\t\tweight_decay = weight_decay\r\n\r\n\r\n\t\tmodel_params = {'input_dim': input_dim,\r\n\t\t\t\t\t\t'hidden_dim' : hidden_dim,\r\n\t\t\t\t\t\t'layer_dim' : layer_dim,\r\n\t\t\t\t\t\t'output_dim' : output_dim,\r\n\t\t\t\t\t\t'dropout_prob' : dropout}\r\n\r\n\t\tmodel = Helper.get_model(str(select_model), model_params) # 'lstm'\r\n\r\n\t\tloss_fn = nn.MSELoss(reduction=\"mean\")\r\n\t\toptimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)\r\n\t\tdevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n\t\tprint(f\"{device}\" \" is available.\")\r\n\r\n\r\n\t\topt = Optimization(device=device, model=model, loss_fn=loss_fn, optimizer=optimizer)\r\n\t\topt.train(train_loader, val_loader, batch_size=batch_size, n_epochs=n_epochs, n_features=input_dim)\r\n\t\topt.plot_losses()\r\n\r\n\t\tpredictions, values = opt.evaluate(\r\n\t\t\ttest_loader_one,\r\n\t\t\tbatch_size=1,\r\n\t\t\tn_features=input_dim\r\n\t\t)\r\n\r\n\t\t#scaler = Helper.get_scaler(select_scaler)\r\n\r\n\t\tdf_result = Helper.format_predictions(predictions, values, X_test, scaler)\r\n\t\tprint(f'Forecast testset predictions: {df_result}')\r\n\r\n\t\tresult_metrics, key_metrics = Helper.calculate_metrics(df_result)\r\n\t\tprint(f'Model Evaluations: {result_metrics}')\r\n\r\n\t\tprint(f'Model Evaluations: {key_metrics}')\r\n\r\n\t\tHelper.plot_metrics(result_metrics, key_metrics)\r\n\r\n\t\tdf_baseline = Helper.build_baseline_model(df, split_ratio, target_col) #df_feature, 0.2, 'value'\r\n\t\tbaseline_metrics = Helper.calculate_metrics(df_baseline)\r\n\r\n\t\tHelper.plot_predictions(df_result, df_baseline)\r\n\r\n\t\treturn opt, scaler\r\n\r\n\r\n\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"deep_xf/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"124975210","text":"from tornado.web import HTTPError\n\nclass API_exception(HTTPError):\n \n def __init__(self, status_code, code, message, errors=None, extra=None):\n '''\n :param status_code: int\n HTTP status code e.g. 400\n :param code: int\n internal error code\n :param message: str\n :param errors: list of error\n :param extra: object\n Extra information for the exception.\n '''\n HTTPError.__init__(self, status_code, message)\n self.status_code = status_code\n self.code = code\n self.errors = errors\n self.message = message\n self.extra = extra\n\nclass Not_found(API_exception):\n\n def __init__(self, message=None):\n API_exception.__init__(self,\n status_code=404,\n code=500, \n message=message or 'the requested item was not found',\n errors=None,\n )\n\nclass Forbidden(API_exception):\n\n def __init__(self, message=None):\n API_exception.__init__(self,\n status_code=403,\n code=501, \n message=message or 'forbidden',\n errors=None,\n )\n\nclass Wrong_email_or_password_exception(API_exception):\n\n def __init__(self):\n API_exception.__init__(self,\n status_code=401,\n code=1000, \n message='wrong email and/or password',\n errors=None,\n )\n\nclass Validation_exception(API_exception):\n\n def __init__(self, errors):\n API_exception.__init__( \n self,\n status_code=400,\n code=1001,\n message='one or more fields failed validation',\n errors=errors,\n )\n\nclass Parameter_must_not_be_set_exception(API_exception):\n\n def __init__(self, message):\n API_exception.__init__(\n self,\n status_code=400,\n code=1002,\n message=message,\n )\n\nclass Parameter_missing_exception(API_exception):\n\n def __init__(self, message):\n API_exception.__init__(\n self,\n status_code=400,\n code=1003,\n message=message,\n )\n\nclass OAuth_unsuported_grant_type_exception(API_exception):\n\n def __init__(self, grant_type):\n API_exception.__init__(\n self,\n status_code=400,\n code=1006,\n message='unsupported grant_type \"{}\"'.format(grant_type),\n extra={\n 'grant_type': grant_type,\n }\n )\n\nclass OAuth_unknown_client_id_exception(API_exception):\n\n def __init__(self, client_id):\n API_exception.__init__(\n self,\n status_code=400,\n code=1007,\n message='unknown client_id: {}'.format(client_id),\n )\n\nclass OAuth_unauthorized_grant_type_level_request_exception(API_exception):\n\n def __init__(self, required_level, app_level):\n API_exception.__init__(\n self,\n status_code=403,\n code=1008,\n message='this app does not have authorization to make this type of grant type request, required level: {}, your app\\'s level: {}'.format(required_level, app_level),\n extra={\n 'app_level': app_level,\n 'required_level': required_level,\n }\n )\n\nclass Not_signed_in_exception(API_exception):\n\n def __init__(self):\n API_exception.__init__(self,\n status_code=401,\n code=1009, \n message='not signed in',\n )\n\nclass Restricted_access_exception(API_exception):\n\n def __init__(self, user_level, required_level):\n API_exception.__init__(self,\n status_code=403,\n code=1010, \n message='your access level: {}, is not high enough for the required level: {}'.format(user_level, required_level),\n extra={\n 'required_level': required_level,\n 'user_level': user_level,\n }\n )\n\nclass User_not_following_show(API_exception):\n\n def __init__(self):\n API_exception.__init__(self,\n status_code=400,\n code=1200, \n message='you do not follow this show',\n )\n\nclass User_has_not_watched_this_episode(API_exception):\n\n def __init__(self):\n API_exception.__init__(self,\n status_code=400,\n code=1300, \n message='you have not watched this episode',\n )\n\nclass Show_unknown(API_exception):\n\n def __init__(self):\n API_exception.__init__(\n self,\n status_code=400,\n code=1400,\n message='unknown show',\n )\n\nclass Show_external_field_must_be_specified_exception(API_exception):\n\n def __init__(self):\n API_exception.__init__(\n self,\n status_code=400,\n code=1401,\n message='the external field must be specified before updating the index field',\n )\n\nclass Show_index_type_must_be_in_external_field_exception(API_exception):\n\n def __init__(self, external_type):\n API_exception.__init__(\n self,\n status_code=400,\n code=1402,\n message='Index type: \"{}\" must first be specified in the external field before adding it to the index field'.format(external_type),\n extra={\n 'external_type': external_type,\n }\n )\n\nclass Show_external_duplicated(API_exception):\n\n def __init__(self, external_title, external_id, show):\n API_exception.__init__(\n self,\n status_code=400,\n code=1403,\n message='A show with the external name and id does already exist'.format(external_title, external_id),\n extra={\n 'show': show,\n 'external_title': external_title,\n 'external_id': external_id,\n }\n )\n\nclass User_unknown(API_exception):\n\n def __init__(self):\n API_exception.__init__(\n self,\n status_code=400,\n code=1500,\n message='unknown user',\n )\n\n\nclass Episode_unknown(API_exception):\n\n def __init__(self):\n API_exception.__init__(\n self,\n status_code=400,\n code=1600,\n message='unknown episode',\n )\n\nclass Elasticsearch_exception(API_exception):\n\n def __init__(self, status_code, extra):\n API_exception.__init__(\n self,\n status_code=status_code,\n code=1700,\n message='search error',\n extra=extra\n ) \n\nclass Sort_not_allowed(API_exception):\n def __init__(self, sort):\n API_exception.__init__(self,\n status_code=400,\n code=1800,\n message='Sort by: \"{}\" is not allowed'.format(sort),\n extra=[sort],\n )\n\nclass Append_fields_not_allowed(API_exception):\n def __init__(self, fields):\n API_exception.__init__(self,\n status_code=400,\n code=1900,\n message='Append fields: \"{}\" are not allowed'.format(','.join(fields)),\n extra=fields,\n )\n\nclass Image_external_duplicate(API_exception): \n def __init__(self, duplicate_image):\n API_exception.__init__(self,\n status_code=400,\n code=2000,\n message='An image with the external name and id does already exist',\n extra=duplicate_image,\n )\n\nclass Image_unknown(API_exception):\n def __init__(self):\n API_exception.__init__(self,\n status_code=400,\n code=2001,\n message='unknown image',\n )\n\nclass Image_set_wrong_type(API_exception):\n\n def __init__(self, image_type, needs_image_type):\n API_exception.__init__(self,\n status_code=400,\n code=2002,\n message='the image could not be set because of a wrong type',\n extra={\n 'is': image_type,\n 'needs': needs_image_type if isinstance(needs, list) \\\n else [needs_image_type],\n }\n )\n\nclass Image_no_data(API_exception):\n def __init__(self):\n API_exception.__init__(self,\n status_code=400,\n code=2003,\n message='No image data assigned. Please upload an image.',\n ) \n \nclass File_upload_no_files(API_exception):\n\n def __init__(self):\n API_exception.__init__(self,\n status_code=400,\n code=2100,\n message='zero files was uploaded',\n )\n\nclass File_upload_unrecognized_image(API_exception):\n\n def __init__(self):\n API_exception.__init__(self,\n status_code=400,\n code=2101,\n message='unrecognized image type: please upload a JPG or PNG image',\n )","sub_path":"src/seplis/api/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":8819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"646243558","text":"\"\"\"\nSort numbers and always pick up the first W elements. Need to delete used elements in case there are duplicates.\n\"\"\"\nclass Solution(object):\n def isNStraightHand(self, hand, W):\n \"\"\"\n :type hand: List[int]\n :type W: int\n :rtype: bool\n \"\"\"\n length = len(hand)\n if length % W != 0: return False\n hand.sort()\n stack = []\n i = 0\n count = 0\n while i < len(hand):\n if (not stack) or (len(stack) < W and stack[-1] == hand[i] - 1):\n stack.append(hand[i])\n hand.pop(i)\n i -= 1\n i += 1\n if len(stack) == W:\n stack = []\n count += 1\n i = 0\n if count == length / W:\n return True\n return False\n","sub_path":"solution/python/846.py","file_name":"846.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"631084711","text":"''' import time\nimport psutil\n\ndef main():\n old_value = 0 \n\n while True:\n new_value = psutil.net_io_counters().bytes_sent + psutil.net_io_counters().bytes_recv\n\n if old_value:\n send_stat(new_value - old_value)\n\n old_value = new_value\n\n time.sleep(1)\n\ndef convert_to_gbit(value):\n return value/1024./1024.*8\n\ndef send_stat(value):\n print (\"%0.3f\" % convert_to_gbit(value)+\"MB/s\")\n\nmain() '''\n\n\nimport psutil as ps\nimport time\n\ndef main():\n old_value_sent = 0\n old_value_recieve=0 \n\n while True:\n new_value_sent = ps.net_io_counters().bytes_sent\n new_value_recieve = ps.net_io_counters().bytes_recv\n if old_value_sent:\n print(\"SENT:\",end=\"\")\n send_stat(new_value_sent-old_value_sent)\n if old_value_recieve:\n print(\"RECIEVED:\",end=\"\")\n send_stat1(new_value_recieve-old_value_recieve)\n old_value_sent = new_value_sent\n old_value_recieve = new_value_recieve\n time.sleep(1)\ndef convert_to_gbit(value):\n return value/1024./1024.*8\n\ndef send_stat(value):\n print(\"%0.3f\" % convert_to_gbit(value)+\"MB/s\",end=\" \")\ndef send_stat1(value):\n print(\"%0.3f\" % convert_to_gbit(value)+\"MB/s\")\n\nmain()","sub_path":"Bandwidth_monitor.py","file_name":"Bandwidth_monitor.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"177887570","text":"import pygame\nscreen = pygame.display.set_mode((640, 480))\n\nrunning = True\nx = 5\ng = 0\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n pygame.draw.circle(screen, (0, g, 0), (320, 240), 50)\n pygame.display.update()\n g += x\n if g>=255 or g<=0:\n x=x*-1\n\n\n\n\n\npygame.quit()\n","sub_path":"Computational Thinking/Pygame.py","file_name":"Pygame.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"544542093","text":"from sklearn.metrics import auc\nimport numpy as np\nfrom sklearn.metrics import precision_recall_curve\nimport sklearn.metrics as metrics\nimport sys\n\nthe_list=['0','1','2','3','4']\ny_long=np.zeros(0)\npred_long=np.zeros(0)\nfor the_id in the_list:\n y=np.genfromtxt(('test_gs.dat.'+the_id),delimiter=',')[:,1]\n pred=np.loadtxt(('prediction.dat.'+the_id))\n y_long=np.hstack((y,y_long))\n pred_long=np.hstack((pred,pred_long))\n\n\na=np.arange(len(y_long))\n\nF=open('auc_bootstrap.txt','w')\ni =0\nwhile (i<10000):\n ll = np.random.choice(a, size=a.shape, replace=True)\n y_tmp=y_long[ll]\n pred_tmp=pred_long[ll]\n fpr, tpr, thresholds = metrics.roc_curve(y_tmp, pred_tmp, pos_label=1)\n the_auc=metrics.auc(fpr, tpr)\n F.write('%.4f\\n' % the_auc)\n i=i+1\nF.close()\n\n","sub_path":"evaluation/evaluation_bootstrap.py","file_name":"evaluation_bootstrap.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"106039669","text":"\"\"\"Test GCPInterface Class.\"\"\"\nfrom interface.gcp import GCPInterface, \\\n new_create_permission_body, new_share_message\nfrom googleapiclient.discovery import Resource\nfrom unittest import mock, TestCase\n\n\nclass TestGCPInterface(TestCase):\n \"\"\"Test Case for GCPInterface class.\"\"\"\n\n def setUp(self):\n self.mock_drive = mock.MagicMock(Resource)\n self.gcp = GCPInterface(self.mock_drive,\n subject=\"team@ubclaunchpad.com\")\n\n def test_ensure_drive_permissions(self):\n # Mocks for files\n mock_files_get = mock.MagicMock()\n mock_files_get.execute = mock.MagicMock(return_value={\n \"parents\": [\n \"parent-drive\",\n ]\n })\n\n mock_files = mock.MagicMock()\n mock_files.get = mock.MagicMock(return_value=mock_files_get)\n\n # Mocks for permissions\n mock_perms_list_parent = mock.MagicMock()\n mock_perms_list_parent.execute = mock.MagicMock(return_value={\n \"permissions\": [\n {\n # should not be removed (inherited)\n \"id\": \"99\",\n \"emailAddress\": \"inherited-permission@ubclaunchpad.com\",\n },\n ]\n })\n mock_perms_list_target = mock.MagicMock()\n mock_perms_list_target.execute = mock.MagicMock(return_value={\n \"permissions\": [\n {\n # should not be removed or created (exists in email list)\n \"id\": \"1\",\n \"emailAddress\": \"not-team@ubclaunchpad.com\",\n },\n {\n # should be removed (does not exist in email list)\n \"id\": \"2\",\n # see gcp_utils.standardize_email\n \"emailAddress\": \"strat.Egy@ubclaunchpad.com\",\n },\n {\n # should not be removed (actor)\n \"id\": \"3\",\n \"emailAddress\": \"team@ubclaunchpad.com\",\n },\n {\n # should not be removed (inherited)\n \"id\": \"99\",\n \"emailAddress\": \"inherited-permission@ubclaunchpad.com\",\n },\n ]\n })\n mock_perms_create = mock.MagicMock()\n mock_perms_create.execute = mock.MagicMock(return_value={})\n mock_perms_delete = mock.MagicMock()\n mock_perms_delete.execute = mock.MagicMock(return_value={})\n\n def perms_list_effect(**kwargs):\n if kwargs['fileId'] == 'target-drive':\n return mock_perms_list_target\n if kwargs['fileId'] == 'parent-drive':\n return mock_perms_list_parent\n\n mock_perms = mock.MagicMock()\n mock_perms.list = mock.MagicMock(side_effect=perms_list_effect)\n mock_perms.list_next = mock.MagicMock(return_value=None)\n mock_perms.create = mock.MagicMock(return_value=mock_perms_create)\n mock_perms.delete = mock.MagicMock(return_value=mock_perms_delete)\n\n # Create Google Drive API\n self.mock_drive.files = mock.MagicMock(return_value=mock_files)\n self.mock_drive.permissions = mock.MagicMock(return_value=mock_perms)\n self.gcp.ensure_drive_permissions('team', 'target-drive', [\n 'robert@bobheadxi.dev',\n 'not-team@ubclaunchpad.com',\n ])\n\n # initial parent search\n mock_files.get.assert_called_with(fileId='target-drive',\n fields=mock.ANY)\n mock_files_get.execute.assert_called()\n # perms listing\n mock_perms.list.assert_has_calls([\n mock.call(fileId='parent-drive',\n fields=mock.ANY),\n mock.call(fileId='target-drive',\n fields=mock.ANY),\n ])\n mock_perms_list_parent.execute.assert_called()\n mock_perms_list_target.execute.assert_called()\n # one email already exists, share to the new one\n mock_perms.create\\\n .assert_called_with(fileId='target-drive',\n body=new_create_permission_body(\n 'robert@bobheadxi.dev'),\n emailMessage=new_share_message('team'),\n sendNotificationEmail=True)\n mock_perms_create.execute.assert_called()\n # one email should no longer be shared, it is removed\n mock_perms.delete.assert_called_with(\n fileId='target-drive', permissionId='2')\n mock_perms_delete.execute.assert_called()\n","sub_path":"tests/interface/gcp_test.py","file_name":"gcp_test.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"507702087","text":"'''\r\n任务:三个厨师同时造蛋挞,每造一个,放入到篮子里。\r\n 如果篮子满了,等待3秒。判断是否已满\r\n 蛋挞的篮子:500个\r\n 每个人手里都有3000元,每个蛋挞2元。\r\n 开始卖蛋挞,当篮子蛋挞不够,等待2秒,一直到钱花光为止\r\n\r\n'''\r\nfrom threading import Thread\r\n#500个蛋挞\r\nbread=0\r\nimport time\r\nclass cook(Thread):\r\n username=\"\" #厨师名\r\n count=0 #蛋挞的数量\r\n def run(self) -> None:\r\n global bread\r\n while True:\r\n if bread<500:\r\n bread=bread+1\r\n self.count=self.count+1\r\n print(self.username,\"总共做了\",self.count,\"个蛋挞\")\r\n\r\n elif bread==500:\r\n time.sleep(3)\r\n\r\nclass customer(Thread):\r\n username=\"\"\r\n count=0\r\n def run(self) -> None:\r\n money =3000\r\n global bread\r\n while True:\r\n if money>0:\r\n bread=bread-1\r\n money=money-2\r\n self.count=self.count+1\r\n print(self.username,\"总共买了\",self.count,\"个蛋挞\")\r\n elif money<0:\r\n print(\"余额不足!!\")\r\n break\r\n\r\nc1=cook()\r\nc2=cook()\r\nc3=cook()\r\nc1.username = \"张三\"\r\nc2.username = \"李四\"\r\nc3.username = \"王五\"\r\n\r\nc1.start()\r\nc2.start()\r\nc3.start()\r\n\r\nk1=customer()\r\nk2=customer()\r\nk3=customer()\r\nk4=customer()\r\nk5=customer()\r\nk6=customer()\r\n\r\nk1.username=\"一号\"\r\nk2.username=\"二号\"\r\nk3.username=\"三号\"\r\nk4.username=\"四号\"\r\nk5.username=\"五号\"\r\nk6.username=\"六号\"\r\n\r\nk1.start()\r\nk2.start()\r\nk3.start()\r\nk4.start()\r\nk5.start()\r\nk6.start()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"多线程蛋挞.py","file_name":"多线程蛋挞.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"355538046","text":"#! /usr/bin/env python3\nfrom Sensor_Factory import sensors\nfrom SQL_queries import Sensors_Select\nimport Sensor_Factory\nfrom SQL_queries import Sensors_insert\nfrom SQL_queries import Sensors_select_type\nfrom SQL_queries import Sensors_select_id\nfrom SQL_queries import Sensors_select_port\nfrom SQL_queries import Sensors_Update\nfrom functools import partial\nfrom SQL_queries import SQLInsertQueries\nfrom configparser import ConfigParser\nimport threading\nimport collections\nimport concurrent.futures\nimport queue\nimport time\nfrom SQL_queries import SchemaLooper \n\nportsTaken = [[\"ttyACM0\",False],[\"ttyACM1\",False],[\"ttyUSB0\",False],[\"ttyUSB1\",False]]\n\nclass SensorThreader(threading.Thread):\n \"\"\"\n Class to move each sensor object operations into its own thread.\n This also allows each output from the sensor to be moved into a queue which acts as a funnel to insert the outputs into a database.\n This avoids currupting the data and the need to avoid locking all threads except the one which most recently recieved data, \n as the whole database in sqlite3 gets locked when data is inserted.\n \"\"\"\n def __init__(self, name):\n super().__init__()\n self.name = name\n\n def sensorThread(self, queue, event, sensorObject, sensorType, sensorID):\n \"\"\"\n This is a thread that will remain active until an 'end event' has been triggered.\n It will continously read data from the sensor that has been passed in, and put that data into a queue.\n \"\"\"\n while not event.is_set():\n try:\n line = next(sensorObject)\n sensorData = [sensorType, line, sensorID]\n queue.put(sensorData)\n except Exception as message:\n print(message)\n print(type(message))\n \n\n \n print(\"sensorThread {} ID:{} received end event. Exiting.\".format(sensorType, sensorID))\n\ndef DatabaseAccessor(queue, event):\n \"\"\"\n Thread which reads the next item in the queue and sends it to the function SQLFinder which inserts it into the database.\n This thread is always active until the 'end event' is trigered, and the queue is empty.\n \"\"\"\n \n while not event.is_set() or not queue.empty():\n sensorData = queue.get()\n # sensorType line of data unique sensor id\n SQLFinder(sensorData[0], sensorData[1], sensorData[2])\n\ndef SQLFinder(sensor, line, sensorID):\n \"\"\"\n This function searches through the implemented sql insert statements to find the one for the sensor passed in.\n If a match is found, it then inserts the line of data into the sql table.\n \"\"\"\n # Check the implemented list of queries and find the one implemented for the current sensor.\n for queryIndex in range(len(SQLInsertQueries)):\n if(SQLInsertQueries[queryIndex][0] == sensor):\n newInsert = partial(SQLInsertQueries[queryIndex][1])\n newInsert(line, sensorID)\n print(\"Sensor {}, output {}\".format(sensor,line))\n\n\ndef AddSensor(newSensor, port, uniqueName):\n # print(\"Please enter the names of the sensors you wish to record data.\")\n # print(\"Type 'Done' once you have entered all the sensors you wish you use.\")\n print(\"Thank you for adding this sensor\")\n\n requestedSensor = newSensor\n requestedSensor = requestedSensor.upper()\n\n # Read in sensors from user and store them in the config file.\n portIndex = portFinder(port)\n if(portIndex != \"nope\"):\n f= open('Config.ini', 'a')\n f.write('\\n\\n'+'['+requestedSensor + '_' + uniqueName+']'+'\\n'+'sensor = '+requestedSensor)\n f.close()\n\n Sensors_insert(requestedSensor, port, uniqueName)\n\n import os \n import psutil\n import logging\n \n try:\n p = psutil.Process(os.getpid())\n return(p)\n # for handler in p.open_files() + p.connections():\n # os.close(handler.fd)\n except Exception as e:\n logging.error(e)\n # python = sys.executable\n # os.execl(python, python, *sys.argv) \n else:\n print(\"That port is already taken. Please check your port selection again.\")\n return \"Not happy\"\n\ndef portFinder(port):\n for portIndex in range(len(portsTaken)):\n if(portsTaken[portIndex][1] == False):\n if(portsTaken[portIndex][0] == port):\n portsTaken[portIndex][1] = True\n return portsTaken[portIndex][0]\n return \"nope\"\n\ndef Main():\n \"\"\"\n The main function which is run when the program starts up.\n It reads in the sensors the user requested, and sorts through approving them, and checking them against ones already set up in the database,\n in the event that the software is being restarted.\n After ensuring all the sensors are set up in the database, it sends each sensor object into its own thread to read data.\n \"\"\"\n\n portsTaken = [[\"ttyACM0\",False],[\"ttyACM1\",False],[\"ttyUSB0\",False],[\"ttyUSB1\",False]]\n import os\n import sys\n import psutil\n import logging\n print(psutil.Process(os.getpid()))\n\n SchemaLooper()\n\n requestedSensorList = []\n configSensorTypeIndex = {'BB3':0, 'BB9':0, 'BB':0, 'NTU':0, 'GPS1':0, 'GPS_ublox7':0, 'RTK':0}\n\n # Get all the requested sensors in the config file into a list.\n parser = ConfigParser()\n parser.read('Config.ini')\n for each_section in parser.sections():\n if(each_section == \"database\" or each_section == \"UserRequest\"):\n pass\n elif(each_section == \"sensorCounters\"):\n configSensorTypeIndex['BB3'] = parser.get(each_section, 'BB3')\n configSensorTypeIndex['BB9'] = parser.get(each_section, 'BB9')\n configSensorTypeIndex['BB'] = parser.get(each_section, 'BB')\n configSensorTypeIndex['NTU'] = parser.get(each_section, 'NTU')\n configSensorTypeIndex['GPS1'] = parser.get(each_section, 'GPS1')\n configSensorTypeIndex['GPS_ublox7'] = parser.get(each_section, 'GPS_ublox7')\n configSensorTypeIndex['RTK'] = parser.get(each_section, 'RTK')\n else:\n requestedSensorList.append(parser.get(each_section, 'sensor'))\n\n sensorTypeList = []\n\n # Sort through the list of sensors to find ones which have been implemented in the software, and create objects of those sensors.\n for currentSensor in requestedSensorList:\n implementation = False\n # when making the sensors, check to see if they have been implemented in the factory pattern.\n for singleSensor in sensors: \n # If the sensor has been implemented, then make the object, perform the first reading to instantiate it, then add it to a sensor list.\n if(currentSensor == singleSensor):\n implementation = True\n singleSensor = singleSensor.upper()\n sensorTypeList.append(singleSensor)\n if(implementation == False):\n print(\"The sensor {} has not been implemented yet.\".format(currentSensor))\n # Get sensors that are already in the database and store them in a list for cross-refferencing.\n currentSensors = Sensors_select_type()\n newSensorList = []\n if(type(currentSensors) is 'NoneType' or not currentSensors):\n print(\"No sensors in database.\")\n else:\n for sensor in currentSensors:\n newSensorList.append(sensor[0])\n \n # Make a copy of the sensors requested to iterate through\n # then sort through the ones made in the database compared to the ones in the config file.\n # If a sensor does exist in both then remove it from the list of approved sensors.\n # The remaining sensors will then be added to the database.\n approvedSensorsToAdd = sensorTypeList[::]\n if(len(newSensorList) == 0):\n print(\"There are no sensors in the database.\")\n else:\n for approvedSensor in sensorTypeList:\n if(approvedSensor in newSensorList):\n newSensorList.remove(approvedSensor)\n approvedSensorsToAdd.remove(approvedSensor)\n else:\n print(\"{} is not in list of approved sensors.\".format(approvedSensor))\n pass\n sensorPorts = Sensors_select_port()\n\n # Add approved sensors that are in the config file to the database.\n for eachSensorIndex in range(len(approvedSensorsToAdd)):\n Sensors_insert(approvedSensorsToAdd[eachSensorIndex], sensorPorts[eachSensorIndex])\n print(\"{} has been Inserted into the database.\".format(approvedSensorsToAdd[eachSensorIndex]))\n\n FinalListOfSensors = []\n dbSensors = Sensors_Select()\n\n for dbSensor in range(len(dbSensors)):\n dbSensorTuple = dbSensors[dbSensor]\n for approvedSensor in range(len(sensorTypeList)):\n if(sensorTypeList[approvedSensor] == dbSensorTuple[1]):\n FinalListOfSensors.append(dbSensorTuple)\n del sensorTypeList[approvedSensor]\n break\n threadExecutor = concurrent.futures.ThreadPoolExecutor()\n pipeline = queue.Queue(maxsize=1000000)\n endEvent = threading.Event()\n threadExecutor.submit(DatabaseAccessor, pipeline, endEvent) \n # portsTaken = [[\"ttyACM0\",False],[\"ttyACM1\",False],[\"ttyUSB0\",False],[\"ttyUSB1\",False]]\n\n for finalSensor in FinalListOfSensors:\n newSensor = Sensor_Factory.factory(finalSensor[1],finalSensor[2])\n portIndex = portFinder(finalSensor)\n if(portIndex != \"nope\"):\n sensor = newSensor.Reading()\n line = next(sensor) \n sensorObject = SensorThreader(finalSensor[1])\n threadExecutor.submit(sensorObject.sensorThread, pipeline, endEvent, sensor, finalSensor[1], finalSensor[0])\n print(\"this port is {}\".format(portIndex))\n continue\n else:\n for portIndex in range(len(portsTaken)):\n if(portsTaken[portIndex][1] == False):\n try:\n newSensor = Sensor_Factory.factory(finalSensor[1],portsTaken[portIndex][0])\n sensor = newSensor.Reading()\n line = next(sensor)\n portsTaken[portIndex][1] = True\n sensorObject = SensorThreader(finalSensor[1])\n threadExecutor.submit(sensorObject.sensorThread, pipeline, endEvent, sensor, finalSensor[1], finalSensor[0])\n Sensors_Update(portsTaken[portIndex][0], finalSensor[0])\n break\n except Exception as error:\n print(\"You have requested more sensors then there are plugged in, please check your port connections.\")\n print(\"Error was: {}\".format(error))\n #print(\"Nope. No idea. Explode!\")\n \n while True:\n pass\n\nif __name__ == \"__main__\":\n Main()\n\n\n# [database]\n# connection=/users/rsg/jkb/Documents/Monocle/sensordata.db\n# type=sqlite3","sub_path":"Sensor_Manager.py","file_name":"Sensor_Manager.py","file_ext":"py","file_size_in_byte":10991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"416950226","text":"import random\n\nfrom ebstorf.world_pb2 import Cell, Point\nfrom ebstorf.world_traverser_pb2_grpc import WorldTraverserServicer\nfrom everett.export.live.grpc import world_common\n\n\nclass WorldTraverser(WorldTraverserServicer):\n _world_generator = None\n\n def __init__(self, world_generator):\n self._world_generator = world_generator\n\n def GetWorld(self, request, context):\n world_id = request.world_id\n world = self._world_generator.get_world(world_id)\n return world_common.stream_world(world_id, world)\n\n def GetCell(self, request, context):\n world_id = request.world_id\n world = self._world_generator.get_world(world_id)\n cell_id = request.id\n return world_common.stream_cell(world, cell_id)\n\n def GetNode(self, request, context):\n world_id = request.world_id\n world = self._world_generator.get_world(world_id)\n node_id = request.id\n return world_common.stream_node(world, node_id)\n\n def GetStartingCell(self, request, context):\n world_id = request.world_id\n world = self._world_generator.get_world(world_id)\n sample_size = 1\n random_cell_ids = []\n for iteration, centre_node_id in enumerate(nm.cells):\n if len(random_cell_ids) < sample_size:\n random_cell_ids.append(centre_node_id)\n else:\n if random.uniform(0, 1.0) < (sample_size / iteration):\n random_cell_ids[random.randint(0, sample_size - 1)] = centre_node_id\n return world_common.stream_cell(world, random_cell_ids[0])","sub_path":"everett/export/live/grpc/world_traverser.py","file_name":"world_traverser.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"175850787","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restaurants', '0011_auto_20150731_1053'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='menutitle',\n name='mealtype',\n field=models.CharField(blank=True, max_length=120, null=True, choices=[(b'Breakfast', b'breakfast'), (b'Lunch', b'lunch'), (b'Dinner', b'dinner'), (b'Supper', b'supper')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"restaurants/migrations/0012_menutitle_mealtype.py","file_name":"0012_menutitle_mealtype.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"475649831","text":"import inspect\nimport math\n\ndebugging = True\ndef debug(*s): \n\tif debugging: \n\t\tprint(*s)\n\n\n#*****************PROBLEM 1 - DICTIONARIES**************************\n#create new dictionary(T) for totals\n#go through days of week\n\t#go through classes studied\n\t\t#if lecture already in T\n\t\t\t#add value to existing element to T\n\t\t#else\n\t\t\t#make new class and add to T\n\t\t\t#assign value to new element\n\t#print out L\ndef addDict(d):\n\tnd = {}\n\tfor day, classes in d.items():\n\t\tfor lecture, hours in classes.items():\n\t\t\tif lecture in nd.keys():\n\t\t\t\tnd[lecture] += hours\n\t\t\telse:\n\t\t\t\tnd[lecture] = hours\n\treturn nd\n\ndef testaddDict():\n\t\td = {'Mon':{'355':2,'451':1,'360':2},'Tue':{'451':2,'360':3},'Thu':{'355':3,'451':2,'360':3}, 'Fri':{'355':2},'Sun':{'355':1,'451':3,'360':1}} \n\t\ttrued = {'355': 8, '451': 8, '360': 9}\n\t\ttestd = addDict(d)\n\t\tif testd == trued:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\t\t\n\n#same as addDict, but now with an extra loop to go through L\t\t\t\ndef addDictN(L):\n\tnd = {}\t\n\tfor n in L:\n\t\tfor day, classes in n.items():\n\t\t\tfor lecture, hours in classes.items():\n\t\t\t\tif lecture in nd.keys():\n\t\t\t\t\tnd[lecture] += hours\n\t\t\t\telse:\n\t\t\t\t\tnd[lecture] = hours\n\treturn nd\n\t\t\ndef testaddDictN():\n\t\td = [{'Mon':{'355':2,'360':2},'Tue':{'451':2,'360':3},'Thu':{'360':3},'Fri':{'355':2}, 'Sun':{'355':1}},{'Tue':{'360':2},'Wed':{'355':2},'Fri':{'360':3, '355':1}},{'Mon':{'360':5},'Wed':{'451':4},'Thu':{'355':3},'Fri':{'360':6},'Sun':{'355':5}}]\n\t\ttrued = {'355': 16, '360': 24, '451': 6}\n\t\ttestd = addDictN(d)\n\t\tif testd == trued:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\n\t\t\n#*****************PROBLEM 2 - lIST COMPREHENSION**************************\t\t\n#same logic as addDict, but convert over to tuples + sort\n\t\t\ndef charCount(s):\n\td = {}\n\tfor i in s:\n\t\tif i in d.keys():\n\t\t\td[i] += 1\n\t\telif i == \" \":\n\t\t\tcontinue\n\t\telse:\n\t\t\td[i] = 1\n\tL = d.items()\n\treturn sorted(L,key=lambda x:(x[1],x[0]))\n\t\t\ndef testcharCount():\n\ts = 'Cpts355 --- Assign1'\n\ttests = charCount(s)\n\ttrues = [('1', 1), ('3', 1), ('A', 1), ('C', 1), ('g', 1), ('i', 1), ('n', 1), ('p', 1), ('t', 1), ('5', 2), ('-', 3), ('s', 3)]\n\tif tests == trues:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\t\n\ndef charCount2(s):\n\td = {}\n\tfor i in s:\n\t\tif i in d.keys():\n\t\t\tcontinue\n\t\telif i == \" \":\n\t\t\tcontinue\n\t\telse:\n\t\t\td[i] = s.count(i)\n\tL = d.items()\n\treturn sorted(L,key=lambda x:(x[1],x[0]))\n\n\t\ndef testcharCount2():\n\ts = 'Cpts355 --- Assign1'\n\ttests = charCount2(s)\n\ttrues = [('1', 1), ('3', 1), ('A', 1), ('C', 1), ('g', 1), ('i', 1), ('n', 1), ('p', 1), ('t', 1), ('5', 2), ('-', 3), ('s', 3)]\n\tif tests == trues:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n#*****************PROBLEM 3 - lIST + DICTIONARY**************************\t\t\n#reverse lists, return first instance.\n#check second element in tuple. if not found, return first element (recursion)\t\n\t\t\ndef lookupVal(L, k):\n\tfor i in reversed(L):\n\t\tfor x,y in i.items():\n\t\t\tif x == k:\n\t\t\t\treturn i[x]\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn None\n\t\t\ndef testlookupVal():\n\tL1 = [{\"x\":1, \"y\":True, \"z\":\"found\"}, {\"x\":2}, {\"y\":False}]\n\ttests = lookupVal(L1, \"t\")\n\ttrues = None\n\tif tests == trues:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\t\n\t\t\ndef lookupVal2(tL, k):\n\tdef lookupHelper(i, tL, k):\n\t\tif k in tL[i][1]:\n\t\t\treturn tL[i][1][k]\n\t\telif i == tL[i][0]:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn lookupHelper(tL[i][0], tL, k)\n\ti = len(tL) - 1\n\treturn lookupHelper(i, tL, k)\n\t\t\ndef testlookupVal2():\n\tL2 = [(0,{\"x\":0,\"y\":True,\"z\":\"zero\"}),(0,{\"x\":1}),(1,{\"y\":False}),(1,{\"x\":3, \"z\":\"three\"}),(2,{})]\n\ttests = lookupVal2(L2, \"t\")\n\ttrues = None\n\tif tests == trues:\n\t\treturn True\n\telse:\n\t\treturn False\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\n#*****************PROBLEM 4 - HIGHER ORDER FUNCTIONS**************************\t\t\n#https://stackoverflow.com/questions/2525845/proper-way-in-python-to-raise-errors-while-setting-variables\t\n\t\t\ndef funRun(d, name, args):\n\tif len(args) == len(inspect.getfullargspec(d[name]).args):\n\t\treturn d[name](*args)\n\telse:\n\t\traise TypeError(\"ERROR: Number of inputs do not match required number of arguments.\")\n\t\t\n\t\t\ndef testfunRun():\n\td = {\"add\": lambda x,y: (x+y), \"concat3\": lambda a,b,c:(a+\",\"+b+\",\"+c),\"mod2\": lambda n: (n % 2)}\n\ttests = funRun(d, \"mod2\", [40])\n\ttrues = 0\n\tif tests == trues:\n\t\treturn True\n\telse:\n\t\treturn False\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n#*****************PROBLEM 5 - RECUSION**************************\t\t\n#let recursion do the dirty work\n\t\t\ndef numPaths(m,n):\n\tif(m == 1 or n == 1):\n\t\treturn 1\n\telse:\n\t\treturn numPaths(m-1, n) + numPaths(m, n-1)\n\t\t\ndef testnumPaths():\n\ttests = numPaths(3,3)\n\ttrues = 6\n\tif tests == trues:\n\t\treturn True\n\telse:\n\t\treturn False\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n#*****************PROBLEM 6 - ITERATORS**************************\t\t\n#https://stackoverflow.com/questions/30254640/calculating-the-square-numbers-within-a-range-python\n\t\t\t\t\nclass iterSquares(object):\n\tdef __init__(self):\n\t\tself.current = 1\n\tdef __next__(self):\n\t\tresult = self.current\n\t\tself.current = (int(math.sqrt(result)) + 1) ** 2\n\t\treturn result\n\tdef __iter__(self):\n\t\treturn self\n\t\t\n\t\t\ndef numbersToSum(iNumbers, sum):\n\tL = []\n\tcount = 0\n\tpeek = iterSquares()\n\tpeek.__next__()\n\tfor n in iNumbers:\n\t\tif (count + n > sum):\n\t\t\tbreak\n\t\telif (count + n < sum):\n\t\t\tL.append(n)\n\t\t\tcount += n\n\t\t\tif (count + peek.__next__() >= sum):\n\t\t\t\tbreak\n\treturn L\n\t\t\n\t\ndef testnumbersToSum():\n\ts = iterSquares()\n\ttests = numbersToSum(s,55)\n\ttrues = [1,4,9,16]\n\ttest2 = numbersToSum(s,100)\n\ttrue2 = [25, 36]\n\t#print(tests)\n\t#print(test2)\n\tif tests == trues:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\n\n\n\n\n\n\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n#*****************PROBLEM 7 - STREAMS**************************\t\t\n#stream class given in class\nclass Stream(object):\n\tdef __init__(self, first, compute_rest, empty= False):\n\t\tself.first = first\n\t\tself._compute_rest = compute_rest\n\t\tself.empty = empty\n\t\tself._rest = None\n\t\tself._computed = False\n\n\t@property\n\tdef rest(self):\n\t\tassert not self.empty, 'Empty streams have no rest.'\n\t\tif not self._computed:\n\t\t\tself._rest = self._compute_rest()\n\t\t\tself._computed = True\n\t\treturn self._rest\n\n\t\t\ndef streamSquares(k):\n\tdef compute_rest():\n\t\treturn streamSquares((int(math.sqrt(k)) + 1) ** 2)\n\treturn Stream(first = k, compute_rest = compute_rest)\n\t\t\ndef teststreamSquares():\n\tsqStream = streamSquares(25)\n\tmyList = []\n\twhile sqStream.first < 225:\n\t\tmyList.append(sqStream.first)\n\t\tsqStream =sqStream.rest\n\ttrues = [25, 36, 49, 64, 81, 100, 121, 144, 169, 196]\n\tif myList == trues:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\t\n\t\t\ndef evenStream(stream):\n\tdef evenCheck(x):\n\t\tif ((x % 2) == 0):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\tdef compute_rest():\n\t\treturn evenStream(stream.rest.rest)\n\treturn Stream(stream.first if evenCheck(stream.first) else stream.rest.first, compute_rest)\n\n\ndef testevenStream():\n\tevenS = evenStream(streamSquares(9))\n\tmyList = []\n\twhile evenS.first < 225:\n\t\tmyList.append(evenS.first)\n\t\tevenS =evenS.rest\n\ttrues = [16, 36, 64, 100, 144, 196]\n\tif myList == trues:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\n\t\t\nif __name__ == '__main__':\n\tpassedMsg = \"%s passed\"\n\tfailedMsg = \"%s failed\"\n\tif testaddDict():\n\t\tprint(passedMsg % 'addDict')\n\telse:\n\t\tprint(failedMsg % 'addDict')\n\t\t\n\tif testaddDictN():\n\t\tprint(passedMsg % 'addDictN')\n\telse:\n\t\tprint(failedMsg % 'addDictN')\n\t\t\n\tif testcharCount():\n\t\tprint(passedMsg % 'charCount')\n\telse:\n\t\tprint(failedMsg % 'charCount')\n\t\t\n\tif testcharCount2():\n\t\tprint(passedMsg % 'charCount2')\n\telse:\n\t\tprint(failedMsg % 'charCount2')\n\t\t\n\tif testlookupVal():\n\t\tprint(passedMsg % 'lookupVal')\n\telse:\n\t\tprint(failedMsg % 'lookupVal')\n\t\t\n\tif testlookupVal2():\n\t\tprint(passedMsg % 'lookupVal2')\n\telse:\n\t\tprint(failedMsg % 'lookupVal2')\n\t\t\n\tif testfunRun():\n\t\tprint(passedMsg % 'funRun')\n\telse:\n\t\tprint(failedMsg % 'funRun')\n\t\t\n\tif testnumPaths():\n\t\tprint(passedMsg % 'numPaths')\n\telse:\n\t\tprint(failedMsg % 'numPaths')\n\t\t\n\tif testnumbersToSum():\n\t\tprint(passedMsg % 'numbersToSum')\n\telse:\n\t\tprint(failedMsg % 'numbersToSum')\n\t\n\tif teststreamSquares():\n\t\tprint(passedMsg % 'streamSquares')\n\telse:\n\t\tprint(failedMsg % 'streamSquares')\n\t\t\n\tif testevenStream():\n\t\tprint(passedMsg % 'evenStream')\n\telse:\n\t\tprint(failedMsg % 'evenStream')","sub_path":"HW3 - Python/HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"505775533","text":"\"\"\"修改在岗状态为合作状态\n\nRevision ID: dc69f5c18cae\nRevises: 81eec4c7716f\nCreate Date: 2019-05-13 09:57:41.625052\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'dc69f5c18cae'\ndown_revision = '81eec4c7716f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('resume', sa.Column('cooperation_state', sa.Integer(), nullable=True))\n op.drop_column('resume', 'school')\n op.drop_column('resume', 'working')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('resume', sa.Column('working', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.add_column('resume', sa.Column('school', mysql.VARCHAR(length=20), nullable=True))\n op.drop_column('resume', 'cooperation_state')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/dc69f5c18cae_修改在岗状态为合作状态.py","file_name":"dc69f5c18cae_修改在岗状态为合作状态.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"159378675","text":"def any_odd(xs):\n\t\"\"\" Return True if there is an odd number in xs, a list of integeres. \"\"\"\n\tfor v in xs:\n\t\tif v % 2 == 1:\n\t\t\treturn True\n\treturn False\n\n\ndef all_odd(xs):\n\t\"\"\" Return True if all the numbers are odd in xs, a list of integers. \"\"\"\n\tfor v in xs:\n\t\tif v % 2 == 0:\n\t\t\treturn False\n\treturn True\n\n\ndef three_odd(xs):\n\t\"\"\" Return True if at least 3 numbers are odd in xs, a list of integers. \"\"\"\n\tcount = 0\n\tfor v in xs:\n\t\tif v % 2 == 1:\n\t\t\tcount += 1\n\t\tif count == 3:\n\t\t\treturn True\n\treturn False\n\n\nodd1 = [1, 3, 5, 7, 8]\nodd2 = [1, 3, 5, 7, 9]\nodd3 = [1, 2, 5, 7, 9]\n\nodd4 = [1, 2, 3, 4, 6]\nodd5 = [1, 2, 3, 4, 7]\nodd6 = [1, 2, 3, 4, 9, 11]\n\n\nprint(all_odd(odd1))\nprint(all_odd(odd2))\nprint(all_odd(odd3))\n\nprint('-'*40)\n\nprint(three_odd(odd4))\nprint(three_odd(odd5))\nprint(three_odd(odd6))\n","sub_path":"Books/Python/How to Think Like a Computer Scientist - Chris Meyers/exercise_answers/ch04/odd_or_not.py","file_name":"odd_or_not.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"535947293","text":"from mpl_toolkits.mplot3d import Axes3D\r\nfrom pip._internal.utils.misc import enum\r\nfrom sklearn.cluster import KMeans, DBSCAN\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom sklearn import metrics\r\n\r\n\r\nclass Clustering:\r\n\r\n Model = enum(KMEANS='Kmeans', HC='HC', DBSCAN='DBSCAN')\r\n\r\n def __init__(self, data_set, no_clusters, plot_result=True):\r\n self.data_set = data_set\r\n self.no_clusters = no_clusters\r\n self.plot_result = plot_result\r\n\r\n def plot(self, clusters):\r\n if len(self.data_set.data_points) > 1 and 1 < len(self.data_set.data_points[0]) <= 3:\r\n if len(self.data_set.data_points[0]) == 2:\r\n self.__plot2d(clusters)\r\n else:\r\n self.__plot3d(clusters)\r\n else:\r\n print('Too many dimensions for plotting')\r\n\r\n def __plot2d(self, clusters):\r\n plt.scatter(self.data_set.data_points[:, 0], self.data_set.data_points[:, 1] if len(\r\n self.data_set.segmentation_vars) > 1 else self.data_set.data_points[:, 0], c=clusters.labels_,\r\n cmap='rainbow')\r\n plt.yticks(())\r\n plt.legend()\r\n plt.show()\r\n\r\n def __plot3d(self, clusters):\r\n fig = plt.figure(1, figsize=(14, 13))\r\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\r\n ax.scatter(self.data_set.data_points[:, 0], self.data_set.data_points[:, 1], self.data_set.data_points[:, 2],\r\n c=clusters.labels_, edgecolor='k')\r\n\r\n ax.w_xaxis.set_ticklabels([])\r\n ax.w_yaxis.set_ticklabels([])\r\n ax.w_zaxis.set_ticklabels([])\r\n ax.set_xlabel(self.data_set.segmentation_vars[0])\r\n ax.set_ylabel(self.data_set.segmentation_vars[1])\r\n ax.set_zlabel(self.data_set.segmentation_vars[2])\r\n ax.dist = 12\r\n plt.show()\r\n\r\n def exec(self, model=Model.KMEANS):\r\n if model is self.Model.KMEANS:\r\n self.model = KMeans(n_clusters=self.no_clusters, max_iter=10000)\r\n self.clusters = self.model.fit(self.data_set.data_points)\r\n elif model is self.Model.HC:\r\n self.model = AgglomerativeClustering()\r\n self.clusters = self.model.fit(self.data_set.data_points)\r\n else:\r\n self.model = DBSCAN(eps=0.0905, min_samples=5)\r\n self.clusters = self.model.fit(self.data_set.data_points)\r\n if self.plot_result:\r\n self.plot(self.clusters)\r\n return self.clusters\r\n\r\n def evaluate_silhouette(self):\r\n labels = self.model.labels_\r\n return metrics.silhouette_score(self.data_set.data_points, labels)\r\n\r\n def evaluate_calinski_harabaz_score(self):\r\n labels = self.model.labels_\r\n return metrics.calinski_harabaz_score(self.data_set.data_points, labels)\r\n\r\n @staticmethod\r\n def clustering_to_dicc(clusters):\r\n i = 0\r\n dicc = {}\r\n for c in clusters.labels_:\r\n if not c in dicc:\r\n dicc[c] = [i]\r\n else:\r\n dicc[c].append(i)\r\n i += 1\r\n return dicc\r\n\r\n @staticmethod\r\n def print_clustering(dicc):\r\n for key in dicc:\r\n print(\"Cluster \" + str(key) + str(dicc[key]))","sub_path":"data_science_example/data_science/segmentation/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"43480069","text":"#!/usr/bin/python3\n\"\"\"This module creates the square class\n\"\"\"\n\nfrom models.rectangle import Rectangle\nfrom models.base import Base\n\n\nclass Square(Rectangle):\n \"\"\"Square Class inherits from Rectangle Class\n\n Arguments:\n Rectangle {Class}\n \"\"\"\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\"Init method\"\"\"\n super().__init__(size, size, x, y, id)\n\n def __str__(self):\n \"\"\"__str__ creates my string representation of the instance\n\n Returns:\n [str]\n \"\"\"\n string = \"[Square] ({}) {}/{} - {}\\\n \".format(self.id, self.x, self.y, self.width)\n return string\n\n @property\n def size(self):\n \"\"\"Getter for size\"\"\"\n return self.width\n\n @size.setter\n def size(self, value):\n \"\"\"size is setter for size\n\n Arguments:\n value {int}\n \"\"\"\n self.width = value\n self.height = value\n\n def update(self, *args, **kwargs):\n \"\"\"update implements two different ways to define or change\n the attributes of an instance\n\n Raises:\n ValueError\n \"\"\"\n if args and len(args) > 0:\n lenght = len(args)\n counter = 0\n for ar in args:\n counter += 1\n if counter < 5:\n if type(ar) is not int:\n raise ValueError(\"arg must be an integer\")\n if lenght >= 1:\n setattr(self, \"id\", args[0])\n if lenght >= 2:\n setattr(self, \"size\", args[1])\n if lenght >= 3:\n setattr(self, \"x\", args[2])\n if lenght >= 4:\n setattr(self, \"y\", args[3])\n else:\n for key, value in kwargs.items():\n if (hasattr(self, key)):\n setattr(self, key, value)\n\n def to_dictionary(self):\n \"\"\"to_dictionary creates and returns the dictionary\n representation of the instance\n\n Returns:\n [dic]\n \"\"\"\n attrs = [\"id\", \"size\", \"x\", \"y\"]\n new_dict = {key: getattr(self, key) for key in attrs}\n return new_dict\n","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"328639163","text":"from __future__ import print_function\nimport _winreg as winreg\nimport os\nimport re\nimport time\nimport fnmatch\nimport sys\n#import argparse\n\n_name = \"Bulk Windows Application Compatibility Settings Utility\"\n_version = \"1.0.0\"\n_description = \"Utility that searches for executable files matching a file mask in a given directory and adds them \" \\\n \"to the Windows application compatibility settings registry key\"\n\n#parser = argparse.ArgumentParser(prog=_name, version=_version)\n#parser.add_argument()\n\n#########\n# Begin configuration options\n#########\n\n# Quiet mode prevents writing to stdout, however error messages will still be written to stderr\nquiet = False\n\n# File masks to search for and add to the registry with application compatibility settings\ninclude_extensions = ['*.exe', '*.com']\n\n# base directory to recursively search for files matching the above file masks\n# If this is None, the current working directory will be used instead, but the variable MUST be set\n# working_dir = r'C:\\some\\dir\\change\\me'\nworking_dir = None\n\n# Hostname to connect remote registry to. If local registry is to be used, this value should be set to None\n# Remote registry connection capability is provided by the _winreg API and supported by the script but is\n# completely UNTESTED in the context of this tool.\nregistry_host = None\n#registry_host = someserver.somedomain.tld\n\n# Registry hive to connect to as a _winreg hive object. By default it is winreg.HKEY_CURRENT_USER and probably\n# shouldn't be changed.\n# This MUST be a winreg hive object. Simply using the hive name as a string WILL NOT WORK!\n# Using winreg.HKEY_LOCAL_MACHINE does not work under Windows 8 (With UAC on or off) and no other platforms\n# have been tested, YMMV\nregistry_hive = winreg.HKEY_CURRENT_USER\n\n# Registry key within the selected hive that stores the application compatibility settings. Do not modify unless\n# you understand what you're doing. 99.9% of users will not need to modify this EVER.\nregistry_key = r'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AppCompatFlags\\Layers'\n\n# Registry value data to set for all new and updated values\n# Registry format is something like: HIVE\\KEY\\KEY\\KEY\\KEY VALUE_NAME:VALUE_TYPE:VALUE_DATA\ncompat_properties = r'~ RUNASADMIN WIN7RTM'\n\n# Continue adding new registry entries or updating existing ones if there is an error writing the previous entry\nresume_on_error = True\n\n# Perform a backup of the registry hive+key before making any modifications.\n# Currently DOES NOT WORK!\ndo_registry_backup = False\n\n# Filename to store the registry backup. Defaults to the user's home directory in an aptly named & timestamped file\nbackup_filename = os.path.join(os.path.expanduser(\"~\"),\n 'AppCompat_Registry_Backup-'\n + time.strftime(\"%a_%d_%b_%Y_%H-%M-%S\")\n + '.reg')\ncontinue_if_backup_fails = True\n\n#########\n# End of configuration options\n#########\n\ndef print_message(message):\n if not quiet:\n print(\"[*] \" + str(message))\n\n\ndef print_error(message, stderr=True):\n if stderr:\n print(\"[!] \" + message, file=sys.stderr)\n else:\n print(\"[!] \" + message)\n\n# List to store files matching the include_extensions file masks\nexecutable_files = []\nexisting_registry_values = {}\nfiles_to_update = []\nfiles_to_add = []\nregistry_backup_completed = (False, None)\nvalues_added = 0\nvalues_updated = 0\nvalues_modified_total = 0\nvalues_skipped = 0\n\nif not working_dir:\n working_dir = os.getcwd()\n\n# Translate our list of file extension masks to regular expressions for use with re.match()\ninclude_extensions_regex = r'|'.join([fnmatch.translate(x) for x in include_extensions])\n\n# Find files recursively in the working_dir that match the specified file masks\nprint_message(\"Recursing directory: \" + working_dir + \" for files matching: \" + ','.join(include_extensions))\nfor root, dirs, files in os.walk(working_dir):\n executable_files += [f for f in [os.path.join(root, f) for f in files] if re.match(include_extensions_regex, f)]\n\n# Quit with informative information if no matching files are found\nif executable_files is None or len(executable_files) < 0:\n print_error(\"No files matching provided file mask(s) in the directory provided. Unable to continue.\")\n print_error(\"Search directory: \" + working_dir)\n print_error(\"File mask(s): \" + ','.join(include_extensions))\n sys.exit(1)\n\n# Open the specified registry hive (Remote or local. If local, registry_host should be None\nif registry_host is None:\n print_message(\"Connecting to local Windows registry.\")\nelse:\n print_message(\"Connecting to Windows registry on host: \" + registry_host)\n\nwith winreg.ConnectRegistry(registry_host, registry_hive) as open_registry_hive:\n # Open the registry key\n with winreg.OpenKey(open_registry_hive, registry_key, 0, winreg.KEY_ALL_ACCESS) as open_registry_key:\n\n if do_registry_backup:\n if registry_backup_completed == (True, registry_key):\n print_error(\"Registry backup for key \\\"\" + registry_key + \"\\\" has already been completed\")\n\n print_message(\"Backing up registry key to file: \" + backup_filename)\n try:\n winreg.SaveKey(open_registry_key, backup_filename)\n except WindowsError as ex:\n print_error(\"Exception caught while saving registry key backup.\")\n print_error(\"Key: \" + registry_key)\n print_error(\"Filename: \" + backup_filename)\n print_error(\"Exception message: \" + str(ex.message))\n print_error(\"Exception filename: \" + str(ex.filename))\n print_error(\"Windows error: \" + str(ex.winerror))\n\n if not continue_if_backup_fails:\n print_error(\"Quitting due to failure to back up the registry key before modifying values.\")\n raise\n else:\n registry_backup_completed = (True, registry_key)\n print_message(\"Registry key successfully saved to file\")\n\n # Retrieve the existing values within the opened key and store them in existing_registry_values dict\n for i in range(0, winreg.QueryInfoKey(open_registry_key)[1]):\n value_pair = winreg.EnumValue(open_registry_key, i)\n existing_registry_values[value_pair[0]] = value_pair[1]\n\n # Check if any of the executables in executable_files already exists in a value, if so put them in a list of\n # tuples,\n # if not, also put them in a list\n # List of tuples [(property,value)]\n files_to_update = [(f, v) for f, v in existing_registry_values.iteritems() if f in executable_files]\n # List of tuples [(property,value)]\n files_to_add = [(f, compat_properties) for f in executable_files if f not in existing_registry_values.keys()]\n\n # Insert new values & data from files_to_add\n for (f, v) in files_to_add:\n try:\n #winreg.SetValue(open_registry_key,f,1,v)\n winreg.SetValueEx(open_registry_key, f, winreg.REG_SZ, winreg.REG_SZ, v)\n except WindowsError as ex:\n print_error(\"Exception caught while creating new registry subkey.\")\n print_error(\"Key: \" + registry_key)\n print_error(\"Name: \" + f)\n print_error(\"Value: \" + v)\n print_error(\"Exception message: \" + str(ex.message))\n print_error(\"Exception filename: \" + str(ex.filename))\n print_error(\"Windows error: \" + str(ex.winerror))\n\n if not resume_on_error is True:\n raise ex\n else:\n values_added += 1\n values_modified_total += 1\n print_message(\"New value created for file: \" + f)\n # Update existing values with new/updated (Or possibly the same) data\n for (f, v) in files_to_update:\n # Overwrite/update the original value data with our static value in compat_properties if they differ,\n # else, don't process the value as no update is needed\n if not v == compat_properties:\n v = compat_properties\n else:\n # No update to the key is needed as new & existing values are identical\n values_skipped += 1\n print_message(\"Skipped updating value as existing and new values are identical: \" + f)\n continue\n\n try:\n winreg.SetValueEx(open_registry_key, f, winreg.REG_SZ, winreg.REG_SZ, v)\n #winreg.SetValue(open_registry_key,f,1,v)\n except WindowsError as ex:\n print_error(\"Exception caught while updating existing registry subkey.\")\n print_error(\"Key: \" + registry_key)\n print_error(\"Name: \" + f)\n print_error(\"Value: \" + v)\n print_error(\"Exception message: \" + ex.message)\n print_error(\"Exception filename: \" + ex.filename)\n print_error(\"Windows error: \" + ex.winerror)\n\n if not resume_on_error is True:\n raise ex\n else:\n values_updated += 1\n values_modified_total += 1\n print_message(\"Updated existing key for file: \" + f)\n\nprint_message(\"Registry values added: \" + str(values_added))\nprint_message(\"Registry values modified: \" + str(values_updated))\nprint_message(\"Registry value modifications skipped (Pre-existing): \" + str(values_skipped))\nprint_message(\"Total registry modifications performed: \" + str(values_modified_total))\n","sub_path":"bulk_add_to_application_compatability.py","file_name":"bulk_add_to_application_compatability.py","file_ext":"py","file_size_in_byte":9651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"63615125","text":"class _No:\n def __init__(self,anterior,proximo,item):\n self.anterior = anterior\n self.proximo = proximo\n self.item = item\n\nclass iterador:\n def __init__(self, lista):\n self.atual = lista.primeiro\n def __next__(self):\n if self.atual.proximo is None:\n raise StopIteration\n else:\n self.atual = self.atual.proximo\n return self.atual.item\n\nclass Lista:\n def __init__(self,*args):\n self.primeiro = self.ultimo = _No(None,None,None)\n self.tamanho = 0\n \n def __iter__(self):\n return iterador(self)\n\n def __len__(self):\n return self.tamanho\n \n def __repr__(self):\n saida = f'{self.__class__.__name__}('\n content = ', '.join(x.__repr__() for x in self)\n saida+= content + ')'\n return saida\n\n def __str__(self):\n saida = f'('\n content = ', '.join(x.__repr__() for x in self)\n saida+= content + ')'\n return saida\n \n def __getitem__(self, i):\n '''\n Busca de elemento por meio da atribuição - item[index] - \n :param i: Index do item a ser buscado\n '''\n atual = self.primeiro\n cont = -1\n while atual.proximo is not None and cont != i:\n atual = atual.proximo\n cont += 1\n if cont == i:\n return atual.item\n else:\n return IndexError\n \n def __conteins__(self,item):\n '''\n Método que retorna True ou False para saber se o elemento está na lista\n por meio - item in Lista - \n '''\n aux = self.primeiro\n while aux.proximo is not None and aux.item != item:\n aux = aux.proximo\n return aux.item\n \n def anexar(self,item):\n '''\n Adiciona um item qualquer, passado como parametro, no final da lista \n :param item: Item a ser anexado a lista\n '''\n self.ultimo.proximo = _No(self.ultimo,None,item)\n self.ultimo = self.ultimo.proximo\n self.tamanho += 1\n \n\n def adicionar_index(self, i, item):\n '''\n Adiciona um item qualquer em uma posição especifica\n :param i: posição(index) do item a ser inserido\n :param item: Item a ser inserido\n '''\n cont = -1\n posicao = self.primeiro\n while i != cont or posicao.proximo is None:\n posicao = posicao.proximo\n cont += 1\n posicao.anterior = _No(posicao.anterior,posicao,item)\n posicao.anterior.anterior.proximo = posicao.anterior\n self.tamanho += 1\n\n def remove_no_fim(self,):\n '''\n Remove o ultimo item da lista\n '''\n aux = self.ultimo\n self.ultimo = self.ultimo.anterior\n self.ultimo.proximo = None\n aux.anterior = None\n val = aux.item\n aux.item = None\n del(aux)\n self.tamanho -= 1\n return val","sub_path":"algorithm application/Projeto 2/listduplamenteencadeada.py","file_name":"listduplamenteencadeada.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"167978735","text":"import requests\nimport json\nimport csv\nfrom urllib.parse import urlencode\nimport datetime\nfrom datetime import timedelta\n\n# ZenDesk API Token and headers\ntoken = ''\nheaders = {'content-type': 'application/json', 'Authorization': 'Basic ' + token}\n#begin the CSV file we are writing to with headers\ncsvheaders = ['Tickets First 30 Days', 'Tickets Between 30 and 60 Days', 'Tickets Between 60 and 90 Days', 'Tickets After 90 days']\nwith open('ticketsbyday.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerows([csvheaders])\n#to assess duration, timedelta objects need to be defined\nthirty = timedelta(days=30)\nsixty = timedelta(days=60)\nninety = timedelta(days=90)\n#Lotta loops here, keep an eye out\n#this source .csv file containes the account name, ZenDesk ID and date of first sale for every account that has rung sales\nwith open('AccountNameAndZenDeskID.csv', 'r', encoding='utf-8-sig') as f:\n reader = csv.reader(f)\n print(reader)\n for row in reader:\n params = {\n 'query': 'type:ticket organization:' + row[0]\n }\n url = ['https://yourorghere.zendesk.com/api/v2/search.json?' + urlencode(params)]\n #This first loop checks for no results and if so, returns an error and keeps going, if not it appends each page of the results to the url list (ZD paginates if there are over 100 returned responses in a search)\n for i in url:\n response = requests.get(i, headers=headers)\n response_text = json.loads(response.text)\n if response_text == []:\n with open('ticketsbyday.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerow(\"Error with \" + row[0])\n continue\n elif response_text['next_page']:\n url.append(response_text['next_page'])\n print(\"Another page!\")\n else:\n print(\"No more pages!\")\n break\n print('Tickets for ' + row[0])\n tickets_30 = 0\n tickets_60 = 0\n tickets_90 = 0\n tickets_after_90 = 0\n #now we have a list of URLs, we iterate over every URL in the list, and query each one for every ticket contained within\n for i in url:\n response = requests.get(i, headers=headers)\n response_text = json.loads(response.text)\n for i in response_text['results']:\n print(datetime.datetime.strptime(i['created_at'], '%Y-%m-%dT%H:%M:%SZ'))\n print(datetime.datetime.strptime(row[2], '%m/%d/%Y %H:%M'))\n #convert created at date of ticket and subtract it from our source csv file to work out how many days the ticket was created after their date of first sale\n ticket_age = datetime.datetime.strptime(i['created_at'], '%Y-%m-%dT%H:%M:%SZ') - datetime.datetime.strptime(row[2], '%m/%d/%Y %H:%M')\n if ticket_age <= thirty:\n print(\"This ticket came in their first 30 days!\")\n tickets_30 += 1\n elif ticket_age <= sixty:\n print(\"This ticket came in their first 60 days!\")\n tickets_60 += 1\n elif ticket_age <= ninety:\n print(\"this ticket came in their first 90 days!\")\n tickets_90 += 1\n else:\n print(\"This ticket is older than 90 days!\")\n tickets_after_90 += 1\n #this just keeps track as the script runs\n print(\"This store had \" + str(tickets_30) + \" tickets in their first 30 days\")\n print(\"This store had \" + str(tickets_60) + \" tickets in their first 60 days\")\n print(\"This store had \" + str(tickets_90) + \" tickets in their first 90 days\")\n print(\"This store had \" + str(tickets_after_90) + \" tickets after their first 90 days\")\n #write the numbers to a new .csv\n number_of_tickets = [str(tickets_30), str(tickets_60), str(tickets_90), str(tickets_after_90)]\n with open('ticketsbyday.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerows([number_of_tickets])\n\n print(response_text['next_page'])\n","sub_path":"306090open.py","file_name":"306090open.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393056098","text":"# -*- coding: utf-8 -*-\n\n# TODO: https://docs.sqlalchemy.org/en/latest/core/pooling.html - for multiple connection\n# http://www.jeremyaldrich.net/en/latest/multiprocessing_sqlalchemy_largefile_processing.html\n\nfrom bogoslovskiy.model.db import AbstractDbWorker\n\nimport sqlalchemy\nimport pandas as pd\n\nimport hashlib\nimport logging\nimport _pickle as cPickle\n\n\nDEFAULT_CACHE_PARAMS: dict = {\n\t\"host\": \"localhost\",\n\t\"ttl\": 60*60*5 # 5 hours\n}\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass InHouseDbWorker(AbstractDbWorker):\n\t\"\"\"Class provides API to work with a database.\n\tIt can return either pandas DataFrame object or SQLAlchemy cursor object.\n\tAll it needs is a ConfigWorker object to work with a database (and a database name).\n\n\tConfiguration file example:\n\t\t[database]\\n\n\t\tdriver = mysql+mysqlconnector\\n\n\t\tuser = user1\\n\n\t\tpassword = pass1\\n\n\t\thost = host1\\n\n\t\tport = 3306\\n\n\t\tdatabase = db1\\n\n\t\"\"\"\n\n\tdef _get_connection_string(self) -> str:\n\t\t\"\"\"Method returns a connection string for a database. All parameters are taken from a configuration file.\n\n\t\tReturns:\n\t\t\tstr: connection string of a form `driver://user:password@host:port/database`\n\n\t\t\"\"\"\n\t\treturn '{0}://{1}:{2}@{3}:{4}/{5}'.format(\n\t\t\tself.config.get_key_from_section_of_config(self.db_name, \"driver\"),\n\t\t\tself.config.get_key_from_section_of_config(self.db_name, \"user\"),\n\t\t\tself.config.get_key_from_section_of_config(self.db_name, \"password\"),\n\t\t\tself.config.get_key_from_section_of_config(self.db_name, \"host\"),\n\t\t\tself.config.get_key_from_section_of_config(self.db_name, \"port\"),\n\t\t\tself.config.get_key_from_section_of_config(self.db_name, \"database\")\n\t\t)\n\n\tdef _get_engine(self) -> sqlalchemy.engine.Engine:\n\t\t\"\"\"Method creates an engine (interface) to a work with a database. It does NOT create a connection.\n\n\t\tReturns:\n\t\t\tsqlalchemy.engine.Engine: object to work with a database\n\n\t\tRaises:\n\t\t\tAssertionError: error indicates that either `self.config` or `self.db_name` is empty\n\n\t\t\"\"\"\n\t\tlogger.debug(\"Getting engine\")\n\n\t\t# we need to guarantee that we get a config and a database name (for config section)\n\t\tassert self.config, logger.error(\"InHouseDbWorker must have a config!!!\")\n\t\tassert self.db_name, logger.error(\"InHouseDbWorker must have a database name!!!\")\n\n\t\tconnection_string: str = self._get_connection_string()\n\n\t\treturn sqlalchemy.create_engine(connection_string)\n\n\tdef _cache_df(self, query: str, conn: sqlalchemy.engine.Connection, ttl: int = 60*60*5, *args) -> pd.DataFrame:\n\t\tquery_hash: str = hashlib.sha224(query.encode('utf-8')).hexdigest()\n\t\tkey: str = \"sql_cache:\" + query_hash\n\t\tlogger.debug(\n\t\t\t\"Created Key\\t : {}\".format(key)\n\t\t)\n\n\t\tif not self.cache_service.get(key):\n\t\t\tdata: pd.DataFrame = pd.read_sql(query, conn, *args)\n\t\t\tserialized_data: bytes = cPickle.dumps(data)\n\n\t\t\t# set data to redis\n\t\t\tself.cache_service.put(key, serialized_data, ttl)\n\n\t\t\tlogger.debug(\"Setting data to Redis\")\n\n\t\tserial: bytes = self.cache_service.get(key)\n\n\t\tdata: pd.DataFrame = cPickle.loads(serial)\n\n\t\treturn data\n\n\tdef get_dataframe(self, query: str, *args) -> pd.DataFrame:\n\t\t\"\"\"Queries a database a returning a DataFrame. If `self.use_cache = True`, it tries to cache a query result.\n\n\t\tArgs:\n\t\t\tquery (str): query string\n\t\t\t*args: arguments for pandas `read_sql`. See documentation here: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql.html\n\n\t\tReturns:\n\t\t\tpd.DataFrame: results of querying in a form of DataFrame\n\n\t\t\"\"\"\n\t\twith self.engine.connect() as conn:\n\t\t\tif self.use_cache:\n\t\t\t\ttry:\n\t\t\t\t\treturn self._cache_df(query, conn)\n\t\t\t\texcept ConnectionError:\n\t\t\t\t\tlogger.warning(\"No connection to the caching service. Quering...\")\n\t\t\t\t\tself.use_cache = False\n\t\t\t\t\treturn pd.read_sql(query, conn, *args)\n\t\t\telse:\n\t\t\t\treturn pd.read_sql(query, conn, *args)\n\n\tdef get_iterable(self, query: str, *args) -> sqlalchemy.engine.result.ResultProxy:\n\t\t\"\"\"\n\n\t\tArgs:\n\t\t\tquery (str): query string\n\t\t\t*args: arguments for SQLAlchemy `execute`. See documentation here: https://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.Connection.execute\n\n\t\tReturns:\n\t\t\tsqlalchemy.engine.result.ResultProxy: iterable with query results\n\n\t\t\"\"\"\n\t\twith self.engine.connect() as conn:\n\t\t\treturn conn.execute(query, *args)\n","sub_path":"bogoslovskiy/model/db/Implementation/InHouseDbWorker.py","file_name":"InHouseDbWorker.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"268981867","text":"#!/usr/bin/python3\n\nimport sys\nimport logging\nfrom os import path, remove\n\nclass Log:\n def __init__(self,\n log_file_name: str = 'large_index.log'\n ):\n super().__init__()\n self.log_file_name = log_file_name\n self.logger = logging.getLogger(__name__)\n self.log_file_format = logging.Formatter('[%(asctime)s] %(levelname)s - %(message)s')\n\n def get_file_handler(self):\n self.file_handler = logging.FileHandler(self.log_file_name)\n self.file_handler.setFormatter(self.log_file_format)\n\n def get_stream_handler(self):\n self.stream_handler = logging.StreamHandler()\n self.stream_handler.setFormatter(self.log_file_format)\n\n def get_logger(self):\n self.logger.setLevel(logging.INFO)\n self.logger.addHandler(self.file_handler)\n self.logger.addHandler(self.stream_handler)\n\n def remove_old_log_file(self):\n if path.isfile(self.log_file_name):\n remove(self.log_file_name)\n\nif __name__ == \"__main__\":\n class_log = Log()\n class_log.remove_old_log_file()\n class_log.get_file_handler()\n class_log.get_stream_handler()\n class_log.get_logger()\n","sub_path":"large_index/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"305441951","text":"from rest_framework import serializers\n\nfrom reviews.models import Comment, Review\n\nfrom titles.models import Category, Genre, Title\n\nfrom users.models import User\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(slug_field='username',\n read_only=True)\n title = serializers.SlugRelatedField(slug_field='pk', read_only=True)\n\n class Meta:\n fields = ('id', 'text', 'author', 'score', 'pub_date', 'title')\n model = Review\n\n def validate(self, data):\n author = self.context['request'].user\n title_id = self.context.get('title_id')\n if (Review.objects.filter(author=author, title=title_id).exists()\n and self.context['request'].method != 'PATCH'):\n raise serializers.ValidationError('Вы уже оставили отзыв')\n return data\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(slug_field='username',\n read_only=True,)\n\n class Meta:\n fields = ('id', 'text', 'author', 'pub_date')\n model = Comment\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('id',\n 'username',\n 'role',\n 'email',\n 'first_name',\n 'last_name',\n 'bio',)\n\n\nclass UserEmailSerializer(serializers.Serializer):\n email = serializers.EmailField(required=True)\n\n def validate(self, data):\n if User.objects.filter(email=data['email']).exists():\n raise serializers.ValidationError('Пользователь с таким email уже '\n 'зарегестрирован в системе')\n return data\n\n\nclass UserLoginSerializer(serializers.Serializer):\n email = serializers.EmailField(required=True)\n secret = serializers.CharField(required=True)\n\n def validate(self, data):\n email = data['email']\n secret = data['secret']\n if not User.objects.filter(email=email,\n secret=secret).exists():\n raise serializers.ValidationError('Вы отправили неверный код')\n return data\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n class Meta:\n fields = ('name', 'slug')\n model = Genre\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n fields = ('name', 'slug')\n model = Category\n\n\nclass TitleSerializer(serializers.ModelSerializer):\n category = CategorySerializer(read_only=True)\n genre = GenreSerializer(read_only=True, many=True)\n\n class Meta:\n fields = ('id',\n 'name',\n 'category',\n 'genre',\n 'year',\n 'description',\n 'rating',)\n model = Title\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"53613160","text":"from discord.ext import commands\nfrom scrape import Scrape\nimport aiomysql\nimport discord\nimport config\nimport util\n\nbot = commands.Bot(command_prefix = commands.when_mentioned_or(*config.prefixes),\n description = config.description,\n pm_help = True)\n\nclient = discord.Client()\n\n@bot.event\nasync def on_ready():\n print(\"Logged in!\")\n print(\"Username: \" + bot.user.name)\n print(\"User ID: \" + bot.user.id)\n await session.connect()\n\n@bot.event\nasync def on_message(message):\n if message.author == bot.user:\n return\n await session.check_message(message)\n await bot.process_commands(message)\n\nif __name__ == \"__main__\":\n for extension in config.initial_extensions:\n try:\n bot.load_extension(extension)\n except (AttributeError, ImportError) as oops:\n print(\"Failed to load extension!\")\n print(\"{}: {}\".format(type(oops), str(oops)))\n session = Scrape(bot.loop)\n util.make_dir()\n\nbot.run(config.token)\n","sub_path":"zahando/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"279451210","text":"# Эта программа делит одно число на другое.\r\n# Тема: --- ИСКЛЮЧЕНИЯ ---\r\n\r\n\r\ndef main():\r\n # Получить два числа.\r\n num1 = int(input('Bвeдитe число: '))\r\n num2 = int(input('Bвeдитe еще одно число: '))\r\n\r\n # Разделить num1 на num2 и показать результат.\r\n result = num2 / num2\r\n print(num1, 'деленное на', num2, 'равняется', result)\r\n\r\n\r\n# Вызвать главную функцию.\r\nmain()\r\n","sub_path":"Chapter 6 (Files)/(6.20) division.py","file_name":"(6.20) division.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"51289316","text":"__author__ = 'ChrisPOConnell'\n'''\nAssignment 4\ntest1.py\nThis file is not used as part of the running code, it's just for testing.\n\nIntent: This is an exact cut and paste from StackOverflow used to generate\n one pass and one fail for a unittest. I used to his to see if a green \n pass bar would be generated in either PyCharm or PyDev. In neither\n environment does a green bar appear.\n'''\n\nfrom unittest.case import TestCase\nimport unittest\nfrom io import StringIO\nclass MyTestCase(TestCase):\n def testTrue(self):\n '''\n Always true\n '''\n assert True\n\n def testFail(self):\n '''\n Always fails\n '''\n assert False\n\nfrom pprint import pprint\nstream = StringIO()\nrunner = unittest.TextTestRunner(stream=stream)\nresult = runner.run(unittest.makeSuite(MyTestCase))\nprint('Tests run ' + str(result.testsRun))\nprint('Errors ' + str(result.errors))\npprint(result.failures)\nstream.seek(0)\nprint('Test output\\n'+ stream.read())","sub_path":"Assignment4/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"516478165","text":"import numpy as np\n\n\nclass sudoku:\n\n domain = set(range(1,10))\n \n def __init__(self):\n self.sudoku = np.zeros((9,9), dtype=int)\n\n def add_symbol(self,i,j,sym):\n if not sym in self.domain:\n raise InvalidSymbolError()\n self.sudoku[i,j] = sym\n\n def get_symbol(self,i,j):\n return self.sudoku[i,j] \n\n def __str__(self):\n\n sep = '+-------+-------+-------+\\n'\n out = ''\n for i in range(0,9):\n\n if i%3 == 0:\n out = out + sep\n \n for j in range(0,9):\n if j%3 == 0:\n out = out + '| '\n\n if int(self.get_symbol(i,j)) == 0:\n sym = ' '\n else:\n sym = str(self.get_symbol(i,j))\n out = out + sym + ' ' \n\n out = out + '|\\n'\n\n out = out + sep\n\n return out\n\nclass InvalidSymbolError(Exception):\n pass\n\nif __name__ == '__main__':\n\n test = sudoku()\n\n test.add_symbol(0,7,9)\n test.add_symbol(6,3,7)\n test.add_symbol(3,3,2)\n test.add_symbol(1,8,1)\n\n print(test)","sub_path":"src/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"163954792","text":"\"\"\"\nA federated learning client using SCAFFOLD.\n\nReference:\n\nKarimireddy et al., \"SCAFFOLD: Stochastic Controlled Averaging for Federated Learning,\"\nin Proceedings of the 37th International Conference on Machine Learning (ICML), 2020.\n\nhttps://arxiv.org/pdf/1910.06378.pdf\n\"\"\"\nimport torch\nfrom torch import optim\n\nimport scaffold_optimizer\n\n\nclass ScaffoldOptimizer(optim.SGD):\n \"\"\"A customized optimizer for SCAFFOLD.\"\"\"\n def __init__(self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False):\n super().__init__(params, lr, momentum, dampening, weight_decay,\n nesterov)\n self.new_client_update_direction = None\n self.server_update_direction = None\n self.client_update_direction = None\n self.client_id = None\n\n self.update_flag = True\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n if self.update_flag is True:\n self.new_client_update_direction = []\n\n # Initialize server update direction and client update direction\n if self.server_update_direction is None:\n self.client_update_direction = [0] * len(group['params'])\n self.server_update_direction = [0] * len(group['params'])\n\n for p, client_update_direction, server_update_direction in zip(\n group['params'], self.client_update_direction,\n self.server_update_direction):\n if p.grad is None:\n continue\n d_p = p.grad.data\n param_state = self.state[p]\n\n if weight_decay != 0:\n d_p.add_(p.data, alpha=weight_decay)\n\n if momentum != 0:\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(\n d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n # Apply variance reduction\n d_p.add_(server_update_direction)\n d_p.sub_(client_update_direction)\n\n # Update weight\n p.data.add_(d_p, alpha=-group['lr'])\n\n # Obtain the latest client update direction\n if self.update_flag is True:\n self.new_client_update_direction.append(d_p)\n\n if self.update_flag is True:\n fn = f\"new_client_update_direction_{self.client_id}.pth\"\n torch.save(self.new_client_update_direction, fn)\n self.update_flag = False\n\n return loss\n","sub_path":"examples/scaffold/scaffold_optimizer.py","file_name":"scaffold_optimizer.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"453777828","text":"\"\"\"\nTest de concurrence avec futures.\n\nPlusieurs requêtes sont envoyées de manière parallèle à l'OS\net les résultats traités au fur et à mesure de leur retour.\n\nAuthor: Dalker (daniel.kessler@dalker.org)\nDate: 2021.03.20\n\"\"\"\n\nimport bisect\nimport sys\nimport time\nimport concurrent.futures\n\n\nWORDS = (\"clause\", \"concurrent\", \"expression\", \"future\", \"grammar\", \"language\",\n \"list\", \"semantics\", \"sentence\", \"syntax\", \"type\", \"word\")\n\n\ndef local_define(word):\n \"\"\"Trouver la définition la plus courante d'un mot dans un fichier.\"\"\"\n with open(f\"defs/{word}.txt\") as thefile:\n definition = thefile.readline()\n time.sleep(0.001)\n return (word, definition)\n\n\ndef sequential_defs():\n \"\"\"Demander des définitions de manière séquentielle, dans l'ordre.\"\"\"\n defs = []\n for word in WORDS:\n defs.append(local_define(word))\n return defs\n\n\ndef concurrent_defs_endsort():\n \"\"\"\n Demander des définitions de manière concurrente.\n\n Dans cette variante, on les remet dans l'ordre à la fin.\n \"\"\"\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n define_calls = (executor.submit(local_define, word) for word in WORDS)\n defs = []\n for future in concurrent.futures.as_completed(define_calls):\n defs.append(future.result())\n return sorted(defs)\n\n\ndef concurrent_defs_insert():\n \"\"\"\n Demander des définitions de manière concurrente.\n\n Dans cette variante, on les classe au fur et à mesure, par insertion.\n \"\"\"\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n define_calls = (executor.submit(local_define, word) for word in WORDS)\n resultats = []\n for future in concurrent.futures.as_completed(define_calls):\n res = future.result()\n bisect.insort(resultats, res)\n return resultats\n\n\ndef time_this(test, name=None):\n \"\"\"Tester la vitesse d'une fonction.\"\"\"\n start = time.time()\n res = test()\n elapsed = time.time() - start\n if name is not None:\n print(\"-=\", name, \"faite en\", elapsed, \"s =-\")\n for w, d in res:\n print(w, \":\", d)\n print()\n return elapsed\n\n\ndef comparative_test(n_tests, m_tests=1):\n \"\"\"Comparer la performance d'appels à url séquentiels vs. concurrents.\"\"\"\n if m_tests > 1:\n print(\"* {} séries de {} tests alternés de chaque sans affichage *:\"\n .format(n_tests, m_tests))\n else:\n print(f\"* {n_tests} tests alternés sans affichage *:\")\n print(\" séq. cc.end cc.ins\")\n gtot1 = gtot2 = gtot3 = 0\n for n in range(n_tests):\n tot1 = tot2 = tot3 = 0\n for _ in range(m_tests):\n tot1 += time_this(sequential_defs)*1000\n tot2 += time_this(concurrent_defs_endsort)*1000\n tot3 += time_this(concurrent_defs_insert)*1000\n gtot1 += tot1\n gtot2 += tot2\n gtot3 += tot3\n print(\" Temps {:2d}: {:.4f} {:.4f} {:.4f}\".format(n+1,\n tot1/m_tests,\n tot2/m_tests,\n tot3/m_tests))\n print(\"Temps moyens: {:.4f} {:.4f} {:.4f}\".format(gtot1/(n_tests*m_tests),\n gtot2/(n_tests*m_tests),\n gtot3/(n_tests*m_tests)))\n\n\ndef print_usage():\n \"\"\"Afficher les options en ligne de commande du programme.\"\"\"\n print(sys.argv[0], \":\",\n \"demander des définitions à un dictionnaire local\")\n print(\"OPTIONS\")\n print(\" seq : méthode séquentielle\")\n print(\" end : méthode concurrente avec tri à la fin de tous les threads\")\n print(\" ins : méthode concurrente avec insertion triée dès arrivée\")\n print(\" : effectuer N tests comparatifs de durée des trois méthodes\")\n print(\" : effectuer N séries de M tests\")\n\n\nif __name__ == \"__main__\":\n try:\n choice = sys.argv[1]\n except IndexError:\n print_usage()\n exit()\n if choice == 'seq':\n time_this(sequential_defs, \"Méthode séquentielle\")\n elif choice == 'end':\n time_this(concurrent_defs_endsort,\n \"Méthode concurrente avec sort final\")\n elif choice == 'ins':\n time_this(concurrent_defs_insert, \"Méthode concurrente avec insertion\")\n else:\n try:\n n = int(choice)\n except ValueError:\n print_usage()\n else:\n comparative_test(n)\n","sub_path":"implementation/Python/file_futures_sleep.py","file_name":"file_futures_sleep.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35660832","text":"import csv\nimport unittest\nimport os\nfrom os.path import isfile, join\nimport sys\nimport pandas as pd\nimport numpy as np\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\n\nfrom main import MisGAN\n\nclass Args():\n def __init__(self):\n self.input = \"..\\\\data\\\\wdbc.csv\"\n self.fname = \"data/wdbc.csv\"\n self.ims = True\n self.preprocess = True\n self.evaluate = True\n self.split = False\n self.model = \"wdbc.csv_train\"\n\n\nclass TestImputationMethods(unittest.TestCase):\n def setUp(self):\n self.args = Args()\n self.imputation_method = MisGAN(self.args) # The implementation of your Imputation method\n self.input_file = self.imputation_method.args.input\n self.verificationErrors = [] # append exceptions for try-except errors\n\n def test_input_file_format(self):\n # test if input file agrees with expected format\n with open(self.input_file, \"r\") as fin:\n lines = csv.reader(fin)\n total_lines = 0\n for line in lines:\n total_lines += 1\n\n def test_impute(self):\n # Test whether the final imputed data have the same shape with input data\n with open(self.input_file, \"r\") as fin:\n lines = csv.reader(fin)\n total_input_lines = 0\n for l in lines:\n input_headers = len(l)\n total_input_lines += 1\n\n preprocess_result = self.imputation_method.preprocess()\n\n if isinstance(preprocess_result, list):\n for res in preprocess_result:\n if isinstance(res, DataLoader):\n s = res.dataset.original_data.shape\n self.assertEquals(s[0], total_input_lines)\n self.assertEquals(s[1], input_headers)\n\n\n def test_evaluate(self, *args, **kwargs):\n self.assertIsInstance(self.imputation_method.evaluate(*args, **kwargs), float)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"data_cleaning/imputation/misgan/sample_test.py","file_name":"sample_test.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"330657088","text":"import unittest\nimport os\nfrom sqlebra.sqlite import SQLiteDB as DB\nfrom sqlebra.dtype import str_ as SQLstr\nfrom sqlebra import exceptions as ex\n\nFILE = 'unittest.sqlebra.db'\n\n\nclass TestInit(unittest.TestCase):\n\n value = 'test'\n\n @classmethod\n def setUpClass(cls):\n cls.dbfile = DB(FILE, mode='w').open()\n\n def test_1_set(self):\n self.dbfile['A'] = self.value\n self.assertEqual([(0, 'A', 'str', None, None, None, None, None, self.value, None, 1, 1)],\n self.dbfile.select(where={'id': 0}))\n\n def test_2_get(self):\n self.assertIsInstance(self.dbfile['A'], SQLstr)\n\n def test_3_py(self):\n self.assertEqual(self.value, self.dbfile['A'].py)\n\n def test_4_edit(self):\n self.dbfile['A'].py = 'edit'\n self.assertEqual('edit', self.dbfile['A'].py)\n\n def test_5_delete(self):\n self.dbfile['A'].delete()\n with self.assertRaises(ex.VariableError):\n self.dbfile['A']\n\n @classmethod\n def tearDownClass(cls):\n cls.dbfile.disconnect()\n os.remove(FILE)\n\n\nif __name__ == '__main__':\n try:\n unittest.main()\n except Exception as e:\n if os.path.exists(FILE):\n os.remove(FILE)\n raise e\n","sub_path":"test_sqlebra/test_dtype/test_str.py","file_name":"test_str.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"642415512","text":"#!flask/bin/python\nfrom flask import Flask, jsonify, abort, request\nimport utils\n\napp = Flask(__name__)\n\n@app.route('/api/playlist', methods=['GET'])\ndef getPlaylist():\n playlist = request.args.get('playlist')\n if playlist == None:\n playlist = 'demo'\n ret = []\n for x in utils.getSongsByScore(playlist):\n d = {}\n d['id'] = x[0]\n d['score'] = x[1]\n d['title'], d['thumbnail'] = utils.getPreview(x[0])\n ret.append(d)\n return jsonify(ret)\n\n@app.route('/api/add', methods=['GET'])\ndef addSongToPlaylist():\n playlistName = request.args.get('playlistname')\n if playlistName == None:\n playlistName = 'demo'\n songUrl = request.args.get('songUrl')\n # print playlistName, songUrl\n utils.createNewSong(playlistName, songUrl)\n return jsonify('added ' + songUrl + ' to ' + playlistName)\n\n@app.route('/api/delete', methods=['GET'])\ndef deleteSongFromPlaylist():\n playlistName = request.args.get('playlistname')\n if playlistName == None:\n playlistName = 'demo'\n songUrl = request.args.get('songUrl')\n utils.deleteSongFromPlaylist(playlistName, songUrl)\n return jsonify('deleted ' + songUrl + ' from ' + playlistName)\n\n@app.route('/api/update', methods=['GET'])\ndef updateScore():\n playlistName = request.args.get('playlistname')\n if playlistName == None:\n playlistName = 'demo'\n songUrl = request.args.get('songUrl')\n diff = int(request.args.get('diff'))\n utils.recreateSong(playlistName, songUrl, diff)\n return jsonify('updated score of ' + songUrl + ' in ' + playlistName + ' by ' + str(diff))\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"164265910","text":"\"\"\"Progs to say the type of instrument.\n\nIMPORTANT NOTE: This version of inst_type.py is different from the other \nversions (e.g. those used in the classification of XBTs for correction). This \ncode has been changed slightly so that the output can be used as the output\nfrom is_xbt.pro is used when filtering out low quality XBTs in the IDL OHC code.\nThe changes are that probeCode is now output and that 0 or 99 or 999 are treated\nas missing probeCode types instead of just 0 or 99. The restrictions for OHC are\nyou shouldn't use the data if probeCode is 0 or 999 AND the year is greater than\nor equal to 1995 or the maxDepth is greater than 900.\n\n\"\"\"\n\nfrom netCDF4 import Dataset\nimport netCDF4\nimport numpy as np\n# Have removed the call to import sag_utilities as sag as it didn't seem to be \n# used.\n\ndef is_xbt(projectName,\n instRef,\n salinities,\n salinityFV,\n depths,\n depthFV):\n\n \"\"\"Say if something is an XBT and the type.\"\"\"\n maxDepth = -99\n WODCountryCode = '-99'\n WODCorrectionCode = -99\n validDepths = depths != depthFV\n if np.count_nonzero(validDepths) > 0:\n maxDepth = np.max(depths[validDepths])\n\n # Definition of types.\n codes = [[2 , -1 , 0 , 0], # Unknown\n [-1 , 101, 1 , 0], # Unknown\n [-1 , 102, 2 , 0], # Unknown\n [201, -1 , 70 , 0], # T7 Brand unknown \n [202, -1 , 40 , 0], # T4 Brand unknown \n [203, -1 , 60 , 0], # T6 Brand unknown \n [204, -1 , 50 , 0], # T5 Brand unknown \n [205, -1 , 100, 0], # T10 Brand unknown \n [206, -1 , 110, 0], # T11 Brand unknown \n [207, 41, 71 , 1], # T7 Sippican \n [207, 42, 72 , 1], # T7 Sippican \n [208, 1, 41 , 1], # T4 Sippican \n [208, 2, 42 , 1], # T4 Sippican \n [209, 31, 60 , 1], # T6 Sippican \n [209, 32, 60 , 1], # T6 Sippican \n [210, 11, 50 , 1], # T5 Sippican \n [211, 61, 100, 1], # T10 Sippican \n [212, 71, 110, 1], # T11 Sippican \n [-1 , 900, 120, 1], # T12 Sippican \n [213, 21, 130, 1], # Fast Deep Sippican \n [214, 51, 141, 1], # Deep Blue Sippican \n [214, 52, 142, 1], # Deep Blue Sippican \n [-1 , 81, 150, 1], # AXBT Sippican \n [215, 201, 41 , 2], # T4 TSK \n [215, 202, 42 , 2], # T4 TSK \n [216, 211, 60 , 2], # T6 TSK \n [216, 212, 60 , 2], # T6 TSK \n [217, 221, 71 , 2], # T7 TSK \n [217, 222, 72 , 2], # T7 TSK \n [218, -1, 0 , 0], # MDI; Academy of Sc \n [219, 231, 50 , 2], # T5 TSK \n [220, 241, 100, 2], # T10 TSK \n [221, 401, 10 , 3], # XBT-1 Sparton \n [222, 411, 30 , 3], # XBT-3 Sparton \n [223, 421, 40 , 3], # XBT-4 Sparton \n [224, 431, 50 , 3], # XBT-5 Sparton \n [225, 441, 51, 3], # XBT-5DB Sparton \n [226, 451, 60 , 3], # XBT-6 Sparton \n [227, 461, 70 , 3], # XBT-7 Sparton \n [-1 , 462, 71 , 3], # XBT-7 Sparton \n [228, 471, 72 , 3], # XBT-7DB Sparton \n [229, 481, 100, 3], # XBT-10 Sparton \n [230, 491, 200, 3], # XBT-20 Sparton \n [231, 501, 201, 3], # XBT-20DB Sparton \n [232, 251, 140, 2], # Deep Blue TSK \n [232, 252, 140, 2], # Deep Blue TSK \n [233, 261, 150, 2], # AXBT TSK \n [234, -1 , 150, 0], # AXBT Unknown \n [235, -1 , 140, 0], # Deep Blue Unknown \n [236, -1 , 130, 0], # Fast Deep Unknown \n [237, -1 , 160, 0], # SSXBT Sippican \n [238, -1 , 150, 0]] # AXBT Sparton \n codes = np.array(codes)\n\n notXBT = [np.array([-1, -1])]\n unkXBT = [np.array([0, 0])] \n\n try:\n probeCode = int(instRef[60:63])\n except ValueError:\n probeCode = ''\n\n if np.any(salinities != salinityFV):\n return notXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n \n if projectName[0:3] == 'WOD':\n # Set the column in the table to look at.\n codeIndex = 0\n\n # Check the instrument type code and return if can go no further.\n instCode = instRef[0:60]\n instCode = instCode.lstrip()\n instCode = instCode.rstrip()\n\n # Get some other information to return to user.\n WODCorrectionCode = int(instRef[63])\n WODCountryCode = projectName[8:10]\n\n # Extract the number for the probe type.\n probeCode = int(instRef[60:63])\n\n if instCode == '':\n if projectName[5:8] == 'XBT':\n return unkXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n else:\n return notXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n if instCode != '2':\n return notXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n\n elif projectName[0:5] == 'GTSPP':\n # Set the column in the table to look at.\n codeIndex = 1\n\n # Get the number for the probe type.\n probeCode = int(instRef)\n\n # Check for situations where can go no further.\n if probeCode == 0 or probeCode == 99 or probeCode == 999:\n if projectName[5:7] == 'XB':\n return unkXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n else:\n return notXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n else: \n return notXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n\n matches = codes[:, codeIndex] == probeCode\n nMatches = np.count_nonzero(matches)\n if nMatches == 0:\n return notXBT + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n\n iMatches = np.argwhere(matches)\n iMatches = iMatches[0] # Repeated codes for WOD data.\n result = np.reshape(codes[iMatches, 2:4], 2)\n\n # Finally, follow prescription in EN3 processing to\n # pick out mislabelled T5s. These will have 5000 added\n # to their code. 840 m max depth is used as depth \n # criterion as this is as used in the EN3 processing and\n # is also max depth of T7s (Tim Boyer; personal communication).\n # Deep Blues can now go to 920 m (Tim Boyer; personal \n # communication) and so these are not tested here.\n typeCode = result[0] // 10\n notSparton = result[1] < 3\n if notSparton:\n if (typeCode == 4 or # T4 max depth = 460m\n typeCode == 6 or # T6 max depth = 460m\n typeCode == 7 or # T7 max depth = 760m\n typeCode == 10 or # T10 max depth = 200m\n typeCode == 11): # T11 max depth = 460m\n if maxDepth > 840.0:\n result[0] += 5000\n\n return [result] + [maxDepth, WODCountryCode, WODCorrectionCode, probeCode]\n\ndef is_mbt(projectName,\n instRef):\n \"\"\"Identify MBTs.\"\"\"\n notMBT = np.array([-1, -1])\n unkMBT = np.array([0, 0])\n\n codes = [[ 1, 800, 00, 00], # MBT type/make unknown.\n [101, -1, 1, 1]] # GM39 (Russia). \n codes = np.array(codes) \n\n if projectName[0:3] == 'WOD':\n codeIndex = 0\n instCode = instRef[0:60]\n instCode = instCode.lstrip()\n instCode = instCode.rstrip()\n if instCode == '':\n if projectName[5:8] == 'MBT':\n return unkMBT\n else:\n return notMBT\n elif instCode != '1':\n return notMBT\n probeCode = int(instRef[60:63]) \n elif projectName[0:5] == 'GTSPP':\n codeIndex = 1\n probeCode = int(instRef)\n if probeCode == 0 or probeCode == 999:\n if projectName[5:7] == 'MB':\n return unkMBT\n else:\n return notMBT\n else:\n return notMBT \n \n matches = codes[:, codeIndex] == probeCode\n nMatches = np.count_nonzero(matches)\n if nMatches == 0:\n return notMBT\n\n iMatches = np.argwhere(matches)\n iMatches = iMatches[0] \n result = np.reshape(codes[iMatches, 2:4], 2)\n\n return result\n\ndef test():\n file = '/data/local/hadgs/Data/EN3_v2a_NoCWT/Profiles/EN3_v2a_NoCWT_Profiles_199501.nc'\n ncid = Dataset(file)\n pn = netCDF4.chartostring(ncid.variables['PROJECT_NAME'][:])\n ir = netCDF4.chartostring(ncid.variables['INST_REFERENCE'][:])\n ps = ncid.variables['PSAL_CORRECTED'][:]\n psfv = ncid.variables['PSAL_CORRECTED']._fillvalue\n de = ncid.variables['DEPH_CORRECTED'][:]\n defv = ncid.variables['DEPH_CORRECTED']._fillvalue\n ncid.close()\n\n f = open('test.txt', 'w')\n WODCountryCode = -1\n for i in np.arange(pn.size):\n vals = is_xbt(pn[i], ir[i], ps[i, :], \n psfv, de[i, :], defv)\n f.write('%i %i %f %s %i\\n' % (vals[0][0], vals[0][1], vals[1], vals[2], vals[3]))\n vals = is_mbt(pn[i], ir[i])\n f.write('%i %i\\n' % (vals[0], vals[1]))\n\n return\n","sub_path":"simplegrid/inst_type.py","file_name":"inst_type.py","file_ext":"py","file_size_in_byte":9849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"69823891","text":"import pygal\r\n\r\n#reading labels and values from external databases\r\n'''\r\nfile = open('pygal.py','r')\r\n\r\nfor line in file.read().splitlines() :\r\n\tif line:\r\n label,value = line.split('')\r\n print(label,value)\r\nfile.close() \r\n''' \r\n\r\npiechart = pygal.pie()\r\npiechart.title = \"Placements\"\r\npiechart.add('Adobe',2)\r\npiechart.add('Microsoft',3)\r\npiechart.add('Google',1)\r\npiechart.add('TexasIn.',6)\r\npiechart.render()\r\n\r\nbar = pygal.Bar()\r\nbar.title = \"Champions probability\"\r\nbar.add('Real',3)\r\nbar.add('Bayern',5)\r\nbar.add('Barca',2)\r\nbar.add('City',4)\r\nbar.render()\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"pygal.py","file_name":"pygal.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"493503506","text":"from openpyxl import Workbook\nwb=Workbook()\n\n#方式一:默认在最后\n# wb1=wb.create_sheet('index')\n\n#方式二:根据索引的位置来添加工作表\nwb1=wb.create_sheet('index',0)\n#方式一:添加内容,用单元格的索引来添加\n# wb1['D3'] = '停车坐爱枫林晚,霜叶红于二月花'\n\n#方式二:根据单元格的位置来添加\n# wb1.cell(row=3,column=5,value='先帝创业为伴而中道崩殂,今天下三分益州疲敝')\n\n#函数\n# wb1['A1']=4\n# wb1['A2']=6\n# wb1['A3']='=sum(A1:A2)'\n\n#添加行\nl=['姓名','性别','年龄','爱好','住址','电话']\nwb1.append(l)\n\nwb1.title='user'\n\nwb.save('s15.xlsx')","sub_path":"01写.py","file_name":"01写.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"260856934","text":"import requests\r\n#study more on requests\r\nr=requests.get(\"https://financialmodelingprep.com/api/company/prince/AApl\")\r\nprint(r.text)#returns text in the website#need connectivity\r\n#learn statuscode\r\nprint(r.status_code)\r\n#to post:\r\nurlex=\"www.something.com\"\r\ndataex={\"val1\":23,\r\n \"val2\":3,\r\n \"val3\":6\r\n}\r\nr2=requests.post(url=urlex,data=dataex)","sub_path":"requestsmodule.py","file_name":"requestsmodule.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545152154","text":"import sys\nimport time\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import Lasso\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn import datasets\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import linear_model\nfrom functools import reduce\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.datasets import make_moons\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom scipy.stats import skew\nimport seaborn as sns\nfrom scipy import stats\nfrom scipy.stats import norm\nfrom sklearn.preprocessing import StandardScaler\n\ndef convert_object(df):\n obj_df = df.select_dtypes(include=['object']).copy()\n for col_name in obj_df.columns:\n df[col_name] = df[col_name].fillna('NA')\n\ndef convert_int64(df):\n int64_df = df.select_dtypes(include=['int64']).copy()\n for col_name in int64_df.columns:\n df[col_name] = df[col_name].fillna(0)\n\ndef convert_float64(df):\n float64_df = df.select_dtypes(include=['float64']).copy()\n for col_name in float64_df.columns:\n df[col_name] = df[col_name].fillna(0.0)\n\ndef preprocess_Fare(df): # 将0 值换成 median值\n df['Fare'].replace(0.0, np.nan, inplace=True)\n df['Fare'] = df['Fare'].fillna(df['Fare'].median())\n\ndef preprocess_Age(df):\n df['Age'] = df['Age'].fillna(df['Age'].median())\n\n\ndef preprocess_Cabin_1(df): #处理方法一\n df['Cabin'].fillna('', inplace=True)\n cabin_names_set = set()\n for val in df['Cabin']:\n cabin_names_set |= set(val.split())\n for name in cabin_names_set:\n col_name = 'Cabin' + '_' + name\n df[col_name] = df['Cabin'].apply(lambda s: 1 if name in s.split() else 0)\n del df['Cabin']\n return df\n\ndef get_cabin_alphabet_set(s):\n cabin_alphabet_set = set()\n for c in s:\n if c.isalpha():\n cabin_alphabet_set.add(c)\n return cabin_alphabet_set\n\ndef preprocess_Cabin_2(df): #处理方法二\n df['Cabin'].fillna('', inplace=True)\n cabin_names_set = set()\n cabin_alpha_set = set()\n for val in df['Cabin']:\n cabin_names_set |= set(val.split())\n cabin_alpha_set |= get_cabin_alphabet_set(val)\n for name in cabin_names_set:\n col_name = 'Cabin' + '_' + name\n df[col_name] = df['Cabin'].apply(lambda s: 1 if name in s.split() else 0)\n for alpha in cabin_alpha_set:\n col_name = 'Cabin' + '_' + alpha\n df[col_name] = df['Cabin'].apply(lambda s: 1 if alpha in s else 0)\n del df['Cabin']\n return df\n\ndef preprocess_Pclass(df):\n df['Pclass'] = df['Pclass'].apply(lambda pn: str(pn))\n\ndef convert_dataframe(df):\n preprocess_Cabin_2(df)\n preprocess_Age(df)\n preprocess_Fare(df)\n preprocess_Pclass(df)\n# convert_object(df)\n convert_int64(df)\n convert_float64(df) \n\ndef adjust_test_dataframe(test_df, train_df):\n for col_name in train_df.columns:\n if col_name not in test_df.columns:\n d_type = train_df[col_name].dtype\n if d_type==np.float64:\n test_df[col_name] = 0.\n elif d_type==np.int64:\n test_df[col_name] = 0\n elif d_type==np.object:\n test_df[col_name] = 'NA'\n \nif __name__=='__main__':\n start_time = time.time()\n\n train_data_frame = pd.read_csv('./train.csv')\n# count_nan = train_data_frame['Ticket'].notnull().sum()\n# print('count_nan is ', count_nan)\n# sys.exit(1)\n test_data_frame = pd.read_csv('./test.csv')\n \n print(train_data_frame[['Age', 'Survived']].corr())\n \n# used_cols = list(set(train_data_frame.columns) -\n# set(['PassengerId', 'Survived', 'Name', 'Ticket']))\n# cols_to_drop = ['PassengerId', 'Survived', 'Name', 'Ticket']\n cols_to_drop = ['PassengerId', 'Survived', 'Name']\n features_data_frame = train_data_frame.drop(cols_to_drop, axis=1, errors='ignore')\n target_data_frame = train_data_frame['Survived']\n test_data_frame = test_data_frame.drop(cols_to_drop, axis=1, errors='ignore')\n \n \n \n \n# concated_dataframe = pd.concat([features_data_frame, test_data_frame])\n concated_dataframe = features_data_frame.append(test_data_frame)\n convert_dataframe(concated_dataframe)\n concated_dataframe = pd.get_dummies(concated_dataframe)\n \n# concated_dataframe = concated_dataframe.select_dtypes(include=['float', 'int']).copy()\n features_data_frame = concated_dataframe[:len(features_data_frame)]\n test_data_frame = concated_dataframe[len(features_data_frame):]\n# print('features_data_frame is ', list(features_data_frame.columns))\n# print('features_data_frame is ', list(features_data_frame['Fare'])[295:307])\n# print('test_dataframe.shape is ', test_data_frame.shape)\n\n\n# features_data_frame = pd.get_dummies(features_data_frame)\n# test_data_frame = pd.get_dummies(test_data_frame)\n# adjust_test_dataframe(features_data_frame, train_data_frame)\n \n# features_cols = list(set(train_data_frame.columns)-set(['Survived']))\n\n# print(data_dummies.dtypes)\n# col_names = list(data_dummies.columns)\n# train_cols = list(set(train_data_frame.columns)-\n# set(['PassengerId', 'Survived', 'Name', 'Ticket']))\n \n# X_train, X_test, y_train, y_test = train_test_split(\n# train_data_frame[train_cols], \n# train_data_frame['SalePrice'], \n# random_state=42)\n\n######################################################################\n \n# lr = LinearRegression().fit(X_train, y_train)\n \n# best_ratio = 0\n# best_score = -1000\n# scores_mean_list = []\n# ratio_list = []\n# for ratio in range(10, 100, 10):\n# print('ratio is ', ratio)\n# kfold = KFold(n_splits=5, shuffle=True, random_state=0)\n# rf = RandomForestRegressor(n_estimators=1000,\n# max_features=int(len(train_cols)*ratio/100),\n# max_depth=4, \n# n_jobs=4)\n# scores = cross_val_score(rf, train_data_frame[train_cols], \n# train_data_frame['SalePrice'], \n# cv=kfold)\n# print('ratio is ', ratio, 'scores mean is ', scores.mean())\n# scores_mean_list.append(scores.mean())\n# ratio_list.append(ratio)\n# if best_score < scores.mean():\n# best_score = scores.mean()\n# best_ratio = ratio\n\n# plt.plot(ratio_list, scores_mean_list)\n# plt.show()\n print('training start...')\n# rf = RandomForestRegressor(n_estimators=10000, \n# max_features=int(len(train_cols)*best_ratio/100),\n# max_depth=4,\n# n_jobs=4).fit(\n# train_data_frame[train_cols],\n# train_data_frame['SalePrice'])\n\n###########################################################################\n\n#svr grid_search.best_params_ is {'n_estimators': 5000, 'learning_rate': 0.1, 'max_depth': 6}\n#svr grid_search.best_score_ is 0.846240179574\n#best score is 0.997755331089\n#time cost is 7278.848999977112\n\n param_grid = {'n_estimators': [5000],\n 'learning_rate': [0.001, 0.001, 0.1],\n 'max_depth': [2, 4, 6, 8, 10, None]}\n \n grid_search = GridSearchCV(GradientBoostingClassifier(random_state=42), \n param_grid, cv=5)\n grid_search.fit(features_data_frame, target_data_frame)\n test_score = grid_search.score(features_data_frame, target_data_frame)\n outcome = list(grid_search.predict(test_data_frame))\n \n print('svr grid_search.best_params_ is ', grid_search.best_params_)\n print('svr grid_search.best_score_ is ', grid_search.best_score_)\n print('best score is ', test_score)\n \n\n# gbr = GradientBoostingClassifier(n_estimators=1000, max_depth=4, \n# learning_rate=0.07,\n# random_state=0)\n# scores = cross_val_score(gbr, features_data_frame, target_data_frame, cv=5)\n# print('scores mean is ', scores.mean())\n# \n## print('features_data_frame is ', features_data_frame['Sex'])\n# gbr.fit(features_data_frame, target_data_frame)\n# print(\"accuracy on training set:\", gbr.score(features_data_frame, \n# target_data_frame))\n#\n# print('length of test_data_frame ', len(test_data_frame))\n# outcome = list(gbr.predict(test_data_frame))\n# print('length of outcome is ', len(outcome))\n\n######################################################################\n\n# param_grid = {'n_estimators': [5000, 8000],\n# 'max_depth': [8, 15, 20, None]}\n# \n# grid_search = GridSearchCV(RandomForestClassifier(random_state=5), \n# param_grid, cv=5)\n# grid_search.fit(features_data_frame, target_data_frame)\n# test_score = grid_search.score(features_data_frame, target_data_frame)\n# outcome = list(grid_search.predict(test_data_frame))\n# \n# print('svr grid_search.best_params_ is ', grid_search.best_params_)\n# print('svr grid_search.best_score_ is ', grid_search.best_score_)\n# print('best score is ', test_score)\n \n#######################################################################\n \n ### Logistic Regression 需要feature rescaling\n \n# df_concated = pd.concat([features_data_frame, test_data_frame])\n# scaler = StandardScaler().fit(df_concated)\n# features_data_frame = scaler.transform(features_data_frame)\n# test_data_frame = scaler.transform(test_data_frame)\n# \n# param_grid = {'penalty': ['l1', 'l2'],\n# 'C': [0.01, 0.1, 1, 10, 100],\n# 'solver': ['liblinear']}\n# \n# grid_search = GridSearchCV(LogisticRegression(random_state=5), \n# param_grid, cv=5)\n# grid_search.fit(features_data_frame, target_data_frame)\n# test_score = grid_search.score(features_data_frame, target_data_frame)\n# outcome = list(grid_search.predict(test_data_frame))\n# \n# print('svr grid_search.best_params_ is ', grid_search.best_params_)\n# print('svr grid_search.best_score_ is ', grid_search.best_score_)\n# print('best score is ', test_score)\n \n \n# lr = LogisticRegression()\n# lr.fit(features_data_frame, target_data_frame)\n# print(\"accuracy on training set:\", lr.score(features_data_frame, \n# target_data_frame))\n#\n# print('length of test_data_frame ', len(test_data_frame))\n# outcome = list(lr.predict(test_data_frame))\n# print('length of outcome is ', len(outcome))\n \n#######################################################################\n \n# param_grid = {'kernel': [\"rbf\"],\n# 'C' : np.logspace(-5, 5, num=11, base=10.0),\n# 'gamma' : np.logspace(-5, 5, num=11, base=10.0)}\n# df_concated = pd.concat([features_data_frame, test_data_frame])\n# scaler = StandardScaler().fit(df_concated)\n# x_train_scaled = scaler.transform(features_data_frame)\n# x_test_scaled = scaler.transform(test_data_frame)\n# \n# grid_search = GridSearchCV(SVC(), param_grid, cv=5)\n# grid_search.fit(x_train_scaled, target_data_frame)\n# test_score = grid_search.score(x_train_scaled, target_data_frame)\n# outcome = list(grid_search.predict(x_test_scaled))\n# \n# print('svr grid_search.best_params_ is ', grid_search.best_params_)\n# print('svr grid_search.best_score_ is ', grid_search.best_score_)\n# print('best score is ', test_score)\n\n\n#######################################################################\n\n outcome_data_frame = pd.DataFrame(\n {'PassengerId': list(range(892, 1310)), \n 'Survived': outcome\n }, index=None)\n \n# outcome_data_frame = pd.DataFrame(\n# {'PassengerId': list(range(892, 1310)), \n# 'Survived': list(np.random.randint(0, 2, size=418))\n# }, index=None)\n \n outcome_data_frame = outcome_data_frame.set_index('PassengerId')\n outcome_data_frame.to_csv('./tsg_outcome.csv')\n\n end_time = time.time() \n print('time cost is ', end_time - start_time)\n \n \n#######################################################################\n","sub_path":"titanic/history/0.80383/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":13501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"228982819","text":"# Tic Tac Toe project, showcasing function programming and logic to play the game\r\n\r\n# The Board, a print function that clears previous board by using a bunch of new lines\r\n\r\nimport random\r\n\r\ndef display_board(board):\r\n print('\\n' * 100)\r\n print(board[7] + '|' + board[8] + '|' + board[9])\r\n print('- - -')\r\n print(board[4] + '|' + board[5] + '|' + board[6])\r\n print('- - -')\r\n print(board[1] + '|' + board[2] + '|' + board[3])\r\n\r\n# Player Marker, uses a while loop to make sure I get the correct responses\r\n\r\ndef player_input():\r\n\r\n player1 = 'nah'\r\n player2 = ''\r\n symbol = ['X', 'O']\r\n\r\n while player1 not in symbol:\r\n player1 = input('Player 1, wanna be \"X\" or \"O\"? ')\r\n\r\n if player1 not in symbol:\r\n print('X or O not chosen, case sensitive and no numbers!')\r\n\r\n # check for what index player1 is at, if == [0] or [1], assign opposite index to player 2\r\n\r\n if player1 == symbol[0]:\r\n player2 = symbol[1]\r\n else:\r\n player2 = symbol[0]\r\n\r\n return (player1, player2)\r\n\r\n# Marker Placement\r\n\r\ndef place_marker(board, marker, position):\r\n board[position] = marker\r\n\r\n# Win Check, takes in board and a player marker to see if that player has won\r\n\r\ndef win_check(board, mark):\r\n\r\n return ((board[1] == board[2] == board[3] == mark) or\r\n (board[4] == board[5] == board[6] == mark) or\r\n (board[7] == board[8] == board[9] == mark) or\r\n (board[1] == board[4] == board[7] == mark) or\r\n (board[2] == board[5] == board[8] == mark) or\r\n (board[3] == board[6] == board[9] == mark) or\r\n (board[1] == board[5] == board[9] == mark) or\r\n (board[3] == board[5] == board[7] == mark))\r\n\r\n# First Move, randomly chooses\r\n\r\ndef choose_first():\r\n decided = random.randint(1, 2)\r\n return str(decided)\r\n\r\n# Space Check, checks if a space a player chooses is actually available to place their marker\r\n\r\ndef space_check(board, position):\r\n return board[position] == ' '\r\n\r\n# Full Board, used in case of a tie\r\n\r\ndef full_board_check(board):\r\n\r\n for x in range(1,10):\r\n\r\n if space_check(board,x):\r\n return False\r\n\r\n return True\r\n\r\n# Player Choice, used to ask player where they'd like to place the marker.\r\n# uses space_check function to see if move possible\r\n\r\ndef player_choice(board):\r\n choice = 0\r\n\r\n while choice not in [1, 2, 3, 4, 5, 6, 7, 8, 9] or not space_check(board, choice):\r\n choice = int(input('Where do you want to place your marker? Pick 1-9: '))\r\n\r\n return choice\r\n\r\n# Replay, play again?\r\n\r\ndef replay():\r\n again = ' '\r\n check = ['YES', 'NO']\r\n\r\n while again not in check:\r\n again = input('Wanna play again? Case sensitively, type YES or NO: ')\r\n\r\n if again == check[0]:\r\n return True\r\n\r\n# GAME LOGIC #\r\n\r\nprint('Welcome to Tic Tac Toe!')\r\n\r\nwhile True:\r\n\r\n game_board = ['#', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\r\n\r\n player1, player2 = player_input()\r\n turn = choose_first()\r\n print(f'Player {turn} will go first!')\r\n\r\n play_game = input('Ready? Y or N: ').upper()\r\n if play_game == 'Y':\r\n game_on = True\r\n else:\r\n game_on = False\r\n\r\n while game_on:\r\n\r\n if turn == '1':\r\n\r\n # Player 1 Turn\r\n display_board(game_board)\r\n\r\n choice = player_choice(game_board)\r\n place_marker(game_board, player1, choice)\r\n\r\n if win_check(game_board, player1):\r\n display_board(game_board)\r\n print('Player 1 wins!')\r\n game_on = False\r\n else:\r\n if full_board_check(game_board):\r\n display_board(game_board)\r\n print('Tie Game!')\r\n game_on = False\r\n else:\r\n turn = '2'\r\n\r\n # Player2's turn.\r\n else:\r\n\r\n display_board(game_board)\r\n\r\n choice = player_choice(game_board)\r\n place_marker(game_board, player2, choice)\r\n\r\n if win_check(game_board, player2):\r\n display_board(game_board)\r\n print('Player 2 wins!')\r\n game_on = False\r\n else:\r\n if full_board_check(game_board):\r\n display_board(game_board)\r\n print('Tie Game!')\r\n game_on = False\r\n else:\r\n turn = '1'\r\n\r\n if not replay():\r\n game_on = False\r\n break\r\n","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"410558269","text":"import pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom sklearn.exceptions import NotFittedError\n\nfrom proglearn.deciders import KNNRegressionDecider, LinearRegressionDecider\nfrom proglearn.base import BaseTransformer, BaseVoter\n\n\ndef test_predict_without_fit():\n # Generate random data\n X = np.random.normal(0, 1, size=(100, 3))\n\n with pytest.raises(NotFittedError):\n krd = KNNRegressionDecider()\n krd.predict(X)\n\n with pytest.raises(NotFittedError):\n lrd = LinearRegressionDecider()\n lrd.predict(X)\n\n\nclass IdentityTransformer(BaseTransformer):\n def __init__(self):\n self._is_fitted = False\n\n def fit(self):\n self._is_fitted = True\n\n def transform(self, X):\n return X\n\n def is_fitted(self):\n return self._is_fitted\n\n\nclass IdentityVoter(BaseVoter):\n def __init__(self, index):\n self._is_fitted = False\n self.index = index\n\n def fit(self):\n self._is_fitted = True\n\n def vote(self, X):\n n = len(X)\n return X[:, self.index].reshape(n)\n\n def is_fitted(self):\n return self._is_fitted\n\n\ndef test_correct_decision():\n np.random.seed(3)\n\n X = 0.1 * np.random.randn(2000, 3) + 1\n Y = np.sum(X, axis=1).reshape((2000, 1))\n\n lrd = LinearRegressionDecider()\n krd = KNNRegressionDecider()\n\n transformer_id_to_transformers = {\n \"0\": [IdentityTransformer()],\n \"1\": [IdentityTransformer()],\n \"2\": [IdentityTransformer()],\n }\n transformer_id_to_voters = {\n \"0\": [IdentityVoter(0)],\n \"1\": [IdentityVoter(1)],\n \"2\": [IdentityVoter(2)],\n }\n\n lrd.fit(X, Y, transformer_id_to_transformers, transformer_id_to_voters)\n krd.fit(X, Y, transformer_id_to_transformers, transformer_id_to_voters)\n\n X_test = np.ones((5, 3))\n Y_test = 3 * np.ones(5)\n\n assert_allclose(Y_test, lrd.predict(X_test), atol=1e-4)\n assert_allclose(Y_test, krd.predict(X_test), atol=1e-2)\n","sub_path":"tests/test_deciders.py","file_name":"test_deciders.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"199966616","text":"import os\nimport sys\nimport json\nimport boto3\nimport zlib\nimport base64\nimport datetime\nfrom datetime import datetime as dt\nimport urllib\nimport requests\n\nprint('loading function')\n \n## Create Logs\ndef logging(logLv, logMsg):\n\n logTimeStump = dt.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n\n print(str(logLv) + \" \" + str(logTimeStump) + \" \" + str(logMsg))\n return\n\n## S3 get list\ndef getList(fileName, s3keyPrefix, s3Bucket, filePath):\n\n s3 = boto3.resource('s3')\n \n newFileName = s3keyPrefix + fileName\n\n try:\n s3.Bucket(s3Bucket).download_file(newFileName, filePath)\n with open(filePath) as f:\n lines = json.load(f)\n return lines\n except Exception:\n logging(\"ERROR\", \"Fail to get files from S3-backet\")\n sys.exit()\n\n## post slack\ndef post_slack(log_data, log_url, contact, description):\n\n SLACK_POST_URL = contact['SLACK']\n channnel = '#connectcommon'\n\n message = str(description) + \\\n \"\\n\" + str(log_data) + \\\n \"\\n\" + log_url\n\n params = {\n 'channel':channnel,\n 'text': message\n }\n try:\n r = requests.post(SLACK_POST_URL, data=json.dumps(params))\n except Exception:\n logging(\"ERROR\", \"Fail to post slack\")\n sys.exit()\n\n## post mail\ndef post_sns(log_data, log_url, contact, description, title):\n\n topic_arn = contact['Mail']\n sns = boto3.client('sns')\n\n sns_message = description + \\\n \"\\n\" + log_data+ \\\n \"\\n\" + log_url\n\n try:\n responses = sns.publish(\n TopicArn = topic_arn,\n Message = sns_message,\n Subject = title\n )\n except Exception:\n logging(\"ERROR\", \"Fail to post Mail\")\n sys.exit()\n\n# judg_status\ndef judge_status(log_data, log_url, x, title):\n\n errorcode = x['errorcode']\n contact = x['contact']\n description = x['description']\n\n if errorcode in log_data:\n print(\"true\")\n if \"Mail\" not in contact:\n post_slack(log_data, log_url, contact, description)\n logging(\"INFO\", \"Post Slack\")\n elif \"SLACK\" not in contact:\n post_sns(log_data, log_url, contact, description, title)\n logging(\"INFO\", \"Post Mail\")\n else:\n post_slack(log_data, log_url, contact, description)\n logging(\"INFO\", \"Post Slack\")\n post_sns(log_data, log_url, contact, description, title)\n logging(\"INFO\", \"Post Mail\")\n else:\n print(\"false\")\n\n## main\ndef lambda_handler(event, context):\n # global val\n ENV = os.environ['ENV']\n STG = os.environ['STG']\n # s3 param\n fileName = os.environ['fileName']\n s3Bucket = os.environ['s3Bucket']\n s3keyPrefix = os.environ['s3keyPrefix']\n filePath = '/tmp/' + fileName\n region = context.invoked_function_arn.split(\":\")[3]\n\n # get logs\n if 'awslogs' not in event:\n if \"aws:sns\" in event['Records'][0]['EventSource']:\n message_unicode = (event['Records'][0]['Sns']['Message'])\n message_dist = json.loads(message_unicode)\n url_quote = urllib.parse.quote(message_dist['AlarmName'], safe='')\n msg = json.dumps(message_dist, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))\n log_url = \"https://\"+str(region)+\".console.aws.amazon.com/cloudwatch/home?region=\"+str(region)+\"#alarmsV2:alarm/\"+str(url_quote)\n # read list and push notification\n listJson = getList(fileName, s3keyPrefix, s3Bucket, filePath)\n for k, v in listJson.items():\n for x in v:\n if \"Metrics\" in k:\n description = \"\"\n contact = x['contact']\n title = \"【inv-\" + str(STG) + \"-\" + str(ENV) + \"】-\" + \"メトリクスアラーム通知\"\n post_slack(msg, log_url, contact, description)\n post_sns(msg, log_url, contact, description, title)\n else:\n data_json = json.loads(zlib.decompress(base64.b64decode(event['awslogs']['data']), 16+zlib.MAX_WBITS))\n log_json = json.loads(json.dumps(data_json, ensure_ascii=False))\n log_grpname = log_json[\"logGroup\"]\n log_stream = log_json[\"logStream\"]\n\n # log stream url\n log_url = \"https://\"+str(region)+\".console.aws.amazon.com/cloudwatch/home?region=\"+str(region)+\"#logEventViewer:group=\"+str(log_grpname)+\";stream=\"+str(log_stream)\n\n # read list and push notification\n listJson = getList(fileName, s3keyPrefix, s3Bucket, filePath)\n\n for mess in log_json['logEvents']:\n log_data = mess['message']\n spl_logdata = log_data.split()\n spl_result = spl_logdata[0]\n result = spl_result[6:]\n for sepmes in spl_logdata:\n if 'X-ErrorId' in sepmes:\n spl_sm = sepmes.split(':')\n ssm = spl_sm[1]\n for k, v in listJson.items():\n for x in v:\n if \"Application\" in k:\n if 'ssm' in locals():\n if ssm == x.get('errorcode'):\n title = \"【inv-\" + str(ENV) + \"-\" + str(STG) + \"】-\" + \"プログラムエラー通知-\" + str(ssm)\n judge_status(log_data, log_url, x, title)\n elif \"errorcode_bash\" in k:\n if x['errorcode'] in log_data:\n errorcode = x['errorcode']\n title = \"bash内部エラー通知-\" + str(errorcode)\n judge_status(log_data, log_url, x, title)\n else :\n if x['errorcode'] in log_data:\n errorcode = x['errorcode']\n title = str(result) + \"-\" + \"シナリオ検知エラー通知-\" + str(errorcode)\n judge_status(log_data, log_url, x, title)\n\n\n","sub_path":"car_connected/cicd_cn/monitor-config/INV-ver-sub01/src/handlers/my_lambda_function/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"148624550","text":"\n### Electric Potential and Electric Field ###\n### I assume that two particles are at: positive charge at (x0p , -1) and negative charge at (x0n , -1) ###\n### 1m*1m was so big and I couldn't see and noticeable effect. So I considered a 1cm*1cm plate. ###\n\nimport numpy as np\nimport math \nfrom pylab import imshow , show, quiver\n\nx0n = 10\nx0p= 0\n\ne = 8.854187817\n\nphi = np.zeros(shape=( 101 , 101))\nx = np.zeros(101)\ny = np.zeros(101)\n\ndx = 10**-4 # in meter unit\n \nfor i in range ( 101):\n for j in range ( 101):\n x[j] = j * dx\n y[i] = i * dx\n phi[i , j] = 100/(4 * e * math.pi) * ( ( (x[j]-x0p*dx)**2 + ( y[i]+ 10**-2 )**2 )**-0.5 - ( (x[j] - x0n*dx )**2 + (y[i] + 10**-2 )**2 )**-0.5 )\n \nimshow(phi , origin = \"lower\")\nshow()\n\nE_x = np.zeros(shape=(101, 101))\nE_y = np.zeros(shape=(101, 101))\nE = np.zeros(shape=(101, 101))\n### electric field ###\nfor i in range (100):\n for j in range (100):\n E_x[i , j] = (phi[i+1,j]-phi[i,j])/dx\n E_y[i ,j] = ( phi[i , j+1]- phi[i , j])/dx\n \n\nquiver(x,y,E_x,E_y)\nshow()\n \n","sub_path":"week03/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315980516","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.random import *\nfrom modules.distributed_regression import update_functions\n\n\nnp.random.seed(0)\nclass distributed_updates(update_functions):\n\n\n def __init__(self):\n self.N = 100\n self.m = 1000\n self.r_i = 80\n self.iteration =20000\n self.sparsity_percentage = 0.1\n self.lamb = 1.69\n self.eta = 0.00002849\n self.B = 0.001\n self.rho = self.lamb*((self.B)**2)\n self.how_weakly_sparse = 0.0\n self.w_noise = 30\n\n def run(self):\n w,w_star,w_all,U_all,d_all,L2,graph = self.make_variables_noise_after(self.N,self.m,self.r_i,self.sparsity_percentage,self.how_weakly_sparse,self.w_noise)\n self.params_checker(self.rho,self.lamb,self.eta,U_all,self.B,self.m,self.N,graph)\n self.centralized_convexity_checker(self.B,self.lamb,U_all,self.N)\n extra_mc = self.pg_extra_mc_soft(U_all,d_all,w_star,L2,self.N,self.m,self.r_i,1.8/self.m,0.00092,6/self.m,self.iteration,graph,w_all)\n extra_l1 = self.pg_extra_l1(U_all,d_all,w_star,L2,self.N,self.m,self.r_i,1.05/self.m,0.00092,self.rho,self.iteration,graph,w_all)\n extra = self.extra(U_all,d_all,w_star,L2,self.N,self.m,self.r_i,self.lamb,0.00092,self.rho,self.iteration,graph,w_all)\n\n\n plt.legend()\n plt.xlabel(\"iterations\")\n plt.ylabel(\"Mean Square Error (dB)\")\n plt.show()\n x = range(len(extra_l1))\n plt.plot(x,extra,label = \"EXTRA\")\n plt.plot(x,extra_l1,label = \"PG-EXTRA L1\")\n plt.plot(x,extra_mc,label = \"PG-EXTRA MC\")\n # plt.plot(x,wdmc1,label = \"distributed mc\")\n plt.plot(x,w_star,color = \"black\")\n plt.legend()\n plt.show()\n # print(extra_mc)\n\nif __name__ == \"__main__\":\n simulation = distributed_updates()\n simulation.run()\n\n #l1 -44 extra -39 mc -44.7弱","sub_path":"main_pgextra_different_m.py","file_name":"main_pgextra_different_m.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"126667962","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 23 18:29:21 2019\n\n@author: hassan\n\"\"\"\n\nfrom socket import *\nfrom threading import Thread\n\n\n\nclients_name={}\nclients = []\naddresses = {}\n\ndef accept_incoming_connections():\n while True:\n \n client,client_address=s.accept()\n print(\"%s:%s has connected ... \" %client_address)\n client.send(bytes(\"Greating From chatRoom...\" + \"Now Type Your Name and press Enter..\" , \"utf-8\"))\n addresses[client]=client_address\n clients.append(client)\n Thread(target=handle_client, args=(client,)).start()\n\ndef handle_client(client):\n name = client.recv(1024).decode('utf-8')\n welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name\n client.send(bytes(welcome, \"utf8\"))\n msg = \"%s has joined the chat!\" % name\n sendToAll(msg,client)\n clients_name[client] = name\n \n while True:\n msg = client.recv(1024)\n if msg != bytes(\"{quit}\", \"utf8\"):\n broadcast(msg, name+\": \")\n else:\n client.send(bytes(\"{quit}\", \"utf8\"))\n client.close()\n del clients[client]\n broadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\n break\n\ndef sendToAll(msg,con):\n for client in clients:\n if (client != con):\n client.send(msg.encode('utf-8'))\n\ndef broadcast(msg, prefix=\"\"): # prefix is for name identification.\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n\n for client in clients:\n client.send(bytes(prefix, \"utf8\")+msg) \n\n\nhost = \"\"\nport = 7000\nadd = (host, port)\n\ns = socket(AF_INET, SOCK_STREAM)\ns.bind(add)\n\ns.listen(5)\nprint(\"Waiting for connection...\")\nACCEPT_THREAD = Thread(target=accept_incoming_connections)\nACCEPT_THREAD.start()\n\n\n \n","sub_path":"Section 2019/sec8 chatRoom/server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"367766387","text":"\"\"\"\nCode for Gaussian processes.\n\"\"\"\n\nfrom argparse import Namespace\nimport copy\nimport numpy as np\n\nfrom .gp_util import kern_exp_quad, sample_mvn, gp_post\n\n\nclass SimpleGp:\n \"\"\"\n Simple GP model without external backend.\n \"\"\"\n\n def __init__(self, params=None, verbose=True):\n \"\"\"\n Parameters\n ----------\n params : Namespace_or_dict\n Namespace or dict of parameters for this model.\n verbose : bool\n If True, print description string.\n \"\"\"\n self.set_params(params)\n if verbose:\n self.print_str()\n\n def set_params(self, params):\n \"\"\"Set self.params, the parameters for this model.\"\"\"\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ls = getattr(params, 'ls', 3.7)\n self.params.alpha = getattr(params, 'alpha', 1.85)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.kernel = getattr(params, 'kernel', kern_exp_quad)\n\n # Initialize self.data to be empty\n self.data = Namespace()\n self.data.X = []\n self.data.y = []\n\n def set_data(self, data):\n \"\"\"Set self.data.\"\"\"\n data = dict_to_namespace(data)\n self.data = copy.deepcopy(data)\n\n def get_gp_prior_mu_cov(self, x_list, full_cov=True):\n \"\"\"\n Return GP prior parameters: mean (mu) and covariance (cov).\n\n Parameters\n ----------\n x_list : list\n List of numpy ndarrays, each representing a domain point.\n full_cov : bool\n If True, return covariance matrix. If False, return list of standard\n deviations.\n\n Returns\n -------\n mu : ndarray\n A numpy 1d ndarray with len=len(x_list) of floats, corresponding to\n posterior mean for each x in x_list.\n cov : ndarray\n If full_cov is False, return a numpy 1d ndarray with len=len(x_list) of\n floats, corresponding to posterior standard deviations for each x in x_list.\n If full_cov is True, return the covariance matrix as a numpy ndarray\n (len(x_list) x len(x_list)).\n \"\"\"\n # NOTE: currently assumes zero-mean prior.\n # TODO: generalized beyond zero-mean prior.\n mu = np.zeros(len(x_list))\n cov = self.params.kernel(x_list, x_list, self.params.ls, self.params.alpha)\n\n if full_cov is False:\n cov = np.sqrt(np.diag(cov))\n\n return mu, cov\n\n def get_gp_post_mu_cov(self, x_list, full_cov=True):\n \"\"\"\n Return GP posterior parameters: mean (mu) and covariance (cov). If there is no\n data, return the GP prior parameters.\n\n Parameters\n ----------\n x_list : list\n List of numpy ndarrays, each representing a domain point.\n full_cov : bool\n If True, return covariance matrix. If False, return list of standard\n deviations.\n\n Returns\n -------\n mu : ndarray\n A numpy 1d ndarray with len=len(x_list) of floats, corresponding to\n posterior mean for each x in x_list.\n cov : ndarray\n If full_cov is False, return a numpy 1d ndarray with len=len(x_list) of\n floats, corresponding to posterior standard deviations for each x in x_list.\n If full_cov is True, return the covariance matrix as a numpy ndarray\n (len(x_list) x len(x_list)).\n \"\"\"\n if len(self.data.X) == 0:\n return self.get_gp_prior_mu_cov(x_list, full_cov)\n\n # If data is not empty:\n\n mu, cov = gp_post(\n self.data.X,\n self.data.y,\n x_list,\n self.params.ls,\n self.params.alpha,\n self.params.sigma,\n self.params.kernel,\n full_cov=full_cov,\n )\n\n return mu, cov\n\n def get_gp_post_mu_cov_single(self, x):\n \"\"\"Get GP posterior for an input x. Return posterior mean and std for x.\"\"\"\n mu_arr, std_arr = self.get_gp_post_mu_cov([x], full_cov=False)\n return mu_arr[0], std_arr[0]\n\n def sample_gp_prior(self, x_list, n_samp, full_cov=True):\n \"\"\"Get samples from gp prior for each input in x_list.\"\"\"\n mu, cov = self.get_gp_prior_mu_cov(x_list, full_cov)\n return self.get_normal_samples(mu, cov, n_samp, full_cov)\n\n def sample_gp_post(self, x_list, n_samp, full_cov=True):\n \"\"\"Get samples from gp prior for each input in x_list.\"\"\"\n if len(self.data.X) == 0:\n return self.sample_gp_prior(x_list, n_samp, full_cov)\n\n # If data is not empty:\n mu, cov = self.get_gp_post_mu_cov(x_list, full_cov)\n return self.get_normal_samples(mu, cov, n_samp, full_cov)\n\n def get_normal_samples(self, mu, cov, n_samp, full_cov):\n \"\"\"Return normal samples.\"\"\"\n if full_cov:\n sample_list = list(sample_mvn(mu, cov, n_samp))\n else:\n sample_list = list(\n np.random.normal(\n mu.reshape(-1,), cov.reshape(-1,), size=(n_samp, len(mu))\n )\n )\n x_list_sample_list = list(np.stack(sample_list).T)\n return x_list_sample_list\n\n def print_str(self):\n \"\"\"Print a description string\"\"\"\n print('*SimpleGp with params={}'.format(self.params))\n\n\ndef dict_to_namespace(params):\n \"\"\"\n If params is a dict, convert it to a Namespace, and return it.\n\n Parameters\n ----------\n params : Namespace_or_dict\n Namespace or dict.\n\n Returns\n -------\n params : Namespace\n Namespace of params\n \"\"\"\n # If params is a dict, convert to Namespace\n if isinstance(params, dict):\n params = Namespace(**params)\n\n return params\n","sub_path":"src/simple_gp.py","file_name":"simple_gp.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"572185634","text":"\r\nclass DogrulamaTool():\r\n def __init__(self,metin):\r\n self.metin = metin\r\n\r\n def ibanDogrulama(self):\r\n metin = self.metin.replace(\" \",\"\") \r\n if metin.isalnum():\r\n metin = metin[4:] + metin[:4]\r\n iban2 = \"\"\r\n for kar in metin:\r\n if not kar.isdigit():\r\n iban2 += str(ord(kar)-55)\r\n else:\r\n iban2 += kar\r\n a = iban2[:9]\r\n b = str(int(a)%97) + iban2[9:18]\r\n c = str(int(b)%97) + iban2[18:]\r\n sonuc = str(int(c)%97)\r\n if sonuc == \"1\":\r\n return True\r\n else:\r\n return False","sub_path":"tools/iban.py","file_name":"iban.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"189212797","text":"# Copyright (c) 2014 The Hackerati, Inc.\n# This project is distributed under the terms of the MIT license.\n# See the file LICENSE or http://opensource.org/licenses/MIT.\n\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nimport vcr\nfrom unittest import skip\n\n# for session workaround\nfrom django.conf import settings\nfrom importlib import import_module\n\nclass OAuth2InitialLoginViewTests(APITestCase):\n def setUp(self):\n self.request_url = '/api/v1/oauth2/login/'\n\n def test_get_oauth2_login(self):\n query_params = {'provider': 'accounts.google.com'}\n with vcr.use_cassette('oidc/fixtures/vcr/generic_discovery.yaml'):\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_302_FOUND)\n #TODO: more thorough tests might be needed here\n\n def test_get_oauth2_login_missing_provider(self):\n response = self.client.get(self.request_url)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_get_oauth2_login_bad_provider(self):\n query_params = {'provider': 'bad.website.net'}\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\nclass OAuth2LinkProviderViewTests(APITestCase):\n fixtures = ['users.json']\n\n def setUp(self):\n self.request_url = '/api/v1/oauth2/link/'\n self.client.force_authenticate(user=User.objects.get(username='user2'))\n\n def test_get_oauth2_link(self):\n query_params = {'provider': 'accounts.google.com'}\n with vcr.use_cassette('oidc/fixtures/vcr/generic_discovery.yaml'):\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_302_FOUND)\n\n def test_get_oauth2_link_missing_provider(self):\n response = self.client.get(self.request_url)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_get_oauth2_link_bad_provider(self):\n query_params = {'provider': 'bad.website.net'}\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\nclass OAuth2ReturnViewTests(APITestCase):\n def setUp(self):\n self.request_url = '/api/v1/oauth2/code/'\n self.state = 'G6UTVEHWEOHL7847LA3KPNLEVC96ZO3O'\n self.code = '4/P7q7W91a-oMsCeLvIaQm6bTrgtp7'\n\n #TODO: how to stub a Flow object?\n\n #TODO: this is an ugly kludge to set up a session since django\n # apparently takes more than 5 years to fix the bugs in its\n # unit testing framework:\n # https://code.djangoproject.com/ticket/10899\n # https://code.djangoproject.com/ticket/11475\n engine = import_module(settings.SESSION_ENGINE)\n store = engine.SessionStore()\n store.save() # we need to make load() work, or the cookie is worthless\n self.session = store\n self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key\n\n # add parameters to session\n session = self.session\n session['oauth2_flow'] = 'flow' #TODO\n session['oauth2_csrf_token'] = self.state\n session['oauth2_provider'] = 'accounts.google.com'\n session['oauth2_is_link_mode'] = False\n session.save()\n\n @skip(\"can't test this\")\n def test_get_oauth2_return(self):\n query_params = {\n 'state': self.state,\n 'code': self.code\n }\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_get_oauth2_return_missing_csrf(self):\n query_params = {\n 'code': self.code\n }\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_get_oauth2_return_invalid_csrf(self):\n query_params = {\n 'state': 'L2Q1J9MHV3RRCEHDH88DSZD43C79GE65',\n 'code': self.code\n }\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_get_oauth2_return_missing_code(self):\n query_params = {\n 'state': self.state\n }\n response = self.client.get(self.request_url, data=query_params)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\nclass OAuth2ReturnViewNoSessionTests(APITestCase):\n def setUp(self):\n self.request_url = '/api/v1/oauth2/code/'\n\n def test_get_oauth2_return_no_session(self):\n response = self.client.get(self.request_url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n","sub_path":"oidc/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"108824598","text":"\"\"\"Project Settings.\"\"\"\nimport os\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '&1pqowa-#idv%)+&&s!yqnd8qf%sm(c1vlabzy-97qftl(kzyv'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = []\n\nADMINS = [\n ('Ankush Chadda', 'contact@aoswebsolutions.com'),\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'whitenoise.runserver_nostatic',\n 'django.contrib.staticfiles',\n\n 'projects',\n 'ideas',\n 'website',\n 'events',\n 'blog',\n 'users',\n 'social.apps.django_app.default',\n 'storages'\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'ctsc.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.media',\n 'website.context_processors.facebook',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'ctsc.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"static\"),\n]\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = BASE_DIR\n\n# Update database configuration with $DATABASE_URL.\ndb_from_env = dj_database_url.config(conn_max_age=500)\nDATABASES['default'].update(db_from_env)\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Allow all host headers\nALLOWED_HOSTS = ['*']\n\n# Simplified static file serving. Static files served from app itself\nSTATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'\n\n# Social Auth\nAUTHENTICATION_BACKENDS = (\n 'social.backends.facebook.FacebookOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\n# FB App\nSOCIAL_AUTH_FACEBOOK_KEY = '1604541223178135'\nSOCIAL_AUTH_FACEBOOK_SCOPE = ['email']\nSOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {\n 'fields': 'id, name, email'\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': ('%(asctime)s [%(process)d] [%(levelname)s] ' +\n 'pathname=%(pathname)s lineno=%(lineno)s ' +\n 'funcname=%(funcName)s %(message)s'),\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n }\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['console','mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n }\n}\n\n# Emails\nEMAIL_HOST = 'smtp.mailgun.org'\nEMAIL_HOST_USER = 'team@ctsc-india.org'\nEMAIL_HOST_PASSWORD = 'ctsc4eva'\nEMAIL_PORT = 587\nAWS_S3_SECURE_URLS = False\nif DEBUG:\n from local_settings import *\nelse:\n # Heroku Settings\n\n SOCIAL_AUTH_FACEBOOK_SECRET = os.environ['FB_APP_SECRET']\n # Storage of User uploaded media\n DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\n # S3 Access for user media only\n AWS_ACCESS_KEY_ID = os.environ['AWS_KEY']\n AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET']\n AWS_STORAGE_BUCKET_NAME = os.environ['AWS_BUCKET']\n","sub_path":"ctsc/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319874647","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 5 14:24:22 2015\r\n\r\n@author: A30294\r\n\"\"\"\r\nclass bc():\r\n b1=15\r\n def b2():\r\n return 20\r\n\r\nimport numpy as np\r\nimport time,sys,os\r\n\r\nt=time.time()\r\na=np.percentile([1,3,5,7,9],75);\r\nb=bc()\r\n\r\n\r\nc=bc\\\r\n.b2()\r\ntime.sleep(2)\r\nt2=time.time()\r\nprint(t2)\r\ntdiff=t2-t","sub_path":"numpy/nptest.py","file_name":"nptest.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"350425915","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Liwink'\n\nimport sys\nfrom redis import StrictRedis as Redis\n\n\ndef _publish(channel, msg):\n subscribers = r.smembers(channel) or []\n for subscriber in subscribers:\n r.rpush(\"{channel}:{subscriber}\".format(channel=channel, subscriber=subscriber), msg)\n\n\nif __name__ == \"__main__\":\n r = Redis(host=\"localhost\", port=6379, db=0)\n _publish(sys.argv[1], sys.argv[2])\n","sub_path":"redis_pubsub/1/publish_sample.py","file_name":"publish_sample.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"5875889","text":"import scrapy\nfrom lxml import etree\n# import lxml.etree\nfrom spiders.items import SpidersItem\n\n'''\n使用 Scrapy 框架和 XPath 抓取猫眼电影的前 10 个电影名称、电影类型和上映时间,并以 UTF-8 字符集保存到 csv 格式的文件中。\n\n猫眼电影网址: https://maoyan.com/films?showType=3\n\n要求:必须使用 Scrapy 框架及其自带的 item pipeline、选择器功能,不允许使用 bs4 进行页面内容的筛选\n'''\n\n\nclass MaoyanspiderSpider(scrapy.Spider):\n name = 'maoyanspider'\n allowed_domains = ['maoyan.com']\n start_urls = ['https://maoyan.com']\n\n # def parse(self, response):\n # pass\n\n def start_requests(self):\n url = 'https://maoyan.com/films?showType=3'\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n self.items = []\n html = etree.HTML(response.text)\n dls = html.xpath('//*/dd')\n for dl in dls:\n name = dl.xpath(\n './div[1]/div[2]/a/div/div[1]/span/text()')[0]\n type = dl.xpath(\n './div[1]/div[2]/a/div/div[2]/text()')[1].strip()\n time = dl.xpath(\n './div[1]/div[2]/a/div/div[4]/text()')[1].strip()\n href = dl.xpath(\"./div[1]/a/@href\")[0]\n item = SpidersItem()\n item[\"name\"] = name\n item[\"type\"] = type\n item[\"time\"] = time\n url = f'https://maoyan.com{href}'\n yield scrapy.Request(\n url=url, meta={\"item\": item}, callback=self.parse_detail)\n # self.items.append(item)\n # return self.items\n\n def parse_detail(self, response):\n item = response.meta[\"item\"]\n html = etree.HTML(response.text)\n short = html.xpath(\n '//*[@id=\"app\"]/div/div[1]/div/div[3]/div[1]/div[1]/div[2]/span/text()')[0]\n item[\"short\"] = short\n yield item\n","sub_path":"week01/zy2/spiders/spiders/spiders/maoyanspider.py","file_name":"maoyanspider.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"403685836","text":"from common.logging_wrapper import setup_logging\nfrom predict_office.tensor_flow_model import TTensorFlowOfficeModel\nimport argparse\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train-pool\", dest='train_pool')\n parser.add_argument(\"--model-folder\", dest='model_folder', required=False, default=\"model\")\n parser.add_argument(\"--bigrams-path\", dest='bigrams_path', required=False, default=\"office_ngrams.txt\")\n parser.add_argument(\"--epoch-count\", dest='epoch_count', required=False, type=int, default=10)\n parser.add_argument(\"--row-count\", dest='row_count', required=False, type=int)\n parser.add_argument(\"--dense-layer-size\", dest='dense_layer_size', required=False, type=int, default=128)\n parser.add_argument(\"--batch-size\", dest='batch_size', required=False, type=int, default=256)\n parser.add_argument(\"--worker-count\", dest='worker_count', required=False, type=int, default=3)\n parser.add_argument(\"--steps-per-epoch\", dest='steps_per_epoch', required=False, type=int, default=None)\n parser.add_argument(\"--device\", dest='device', required=False, default=\"/cpu:0\", help=\"can be /cpu:0 or /gpu:0\")\n return parser.parse_args()\n\n\ndef main():\n logger = setup_logging(log_file_name=\"predict_office_train.log\")\n args = parse_args()\n\n model = TTensorFlowOfficeModel(logger, args.bigrams_path, args.model_folder, create_model=True,\n work_pool_path=args.train_pool, row_count=args.row_count)\n model.train_tensorflow(args.dense_layer_size,\n epoch_count=args.epoch_count,\n batch_size=args.batch_size,\n workers_count=args.worker_count,\n steps_per_epoch=args.steps_per_epoch,\n device_name=args.device\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/predict_office/scripts/tf_office_train.py","file_name":"tf_office_train.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"510883831","text":"#!/usr/bin/env python3\n\n\"\"\"Split CIF timetable file into file chunks in the 'storage' directory by ID or if more than 65535 lines long\"\"\"\nimport sys\n\nN = 0\nM = 0\n\nKEY = None\nNAME = None\n\nfin = sys.stdin\nfout = None\nfor line in fin:\n N += 1\n ID = line[0:2]\n BUFFER = False\n\n if ID != KEY:\n if ID in ['BS', 'BX', 'CR', 'LI', 'LO', 'LT']:\n if NAME != 'PATH':\n NAME = 'PATH'\n BUFFER = True\n elif ID in ['TI', 'TA', 'TD']:\n if NAME != 'TR':\n NAME = 'TR'\n BUFFER = True\n else:\n NAME = ID\n BUFFER = True\n\n if (N > 65535 and ID == 'BS') or BUFFER:\n M += 1\n if fout:\n fout.close()\n filename = 'output/{}_{}'.format(NAME, str(M).zfill(3))\n fout = open(filename, 'w')\n N = 0\n fout.write(line)\n\n KEY = ID\n\nfout.close()\n","sub_path":"timetable/wtt-split.py","file_name":"wtt-split.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"129762967","text":"\"\"\"\nCreate and read/write to Sqlite database\nhttps://docs.python.org/3/library/sqlite3.html#sqlite3.Connection\nhttp://www.sqlitetutorial.net/sqlite-python/create-tables/\nhttps://www.pythoncentral.io/advanced-sqlite-usage-in-python/\n\"\"\"\n\nimport datetime\nimport sqlite3\nfrom sqlite3 import Error\n\nclass Lane_DB():\n def __init__(self):\n \"\"\" Initialize the lane database \"\"\"\n self.DB = sqlite3.connect('lane_closures.db', detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n\n self.cursor = self.DB.cursor()\n self.cursor.execute('''CREATE TABLE IF NOT EXISTS lanes(\n id INTEGER PRIMARY KEY,\n closure_id INTEGER,\n primary_street TEXT,\n date_closed_from TIMESTAMP,\n date_closed_to TIMESTAMP,\n boundaries TEXT,\n traffic_effect TEXT,\n published INTEGER\n )''')\n\n def write(self, lane_data):\n \"\"\" lane_data (array):\n [0] closure_id\n [1] primary_street\n [2] date_closed_from\n [3] date_closed_to\n [4] boundaries\n [5] traffic_effect\n [6] published (0/1)\"\"\"\n\n if len(lane_data) == 6:\n try:\n self.cursor.execute('INSERT INTO lanes(closure_id, primary_street, date_closed_from, date_closed_to, boundaries, traffic_effect, published) VALUES (?)', lane_data)\n except sqlite3.Error as error:\n print(error)\n finally:\n self.DB.commit()","sub_path":"lib/db_store.py","file_name":"db_store.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"441491041","text":"__author__ = 'bbizic'\n\ndef addNumbers():\n \"\"\"this is how you write documentation in Python\"\"\"\n i = 2; #this is how you comment line\n j = 4;\n z = i + j;\n print(str(z));\n\ndef addWithParam(firstNume, secondNume):\n result = firstNume + secondNume;\n return result;\n\ndef withDefParam(someParam = 2):\n return someParam;\n\naddNumbers();\n\nprint(addWithParam(3,5));\n\nprint(addNumbers.__doc__);\nprint(dir());\n","sub_path":"PythonTest/funtions.py","file_name":"funtions.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"497124552","text":"# -*- coding: utf-8 -*- \n# Author: Tyler Lau\n\nimport numpy\nfrom constants import phon_to_feat\nfrom constants import n_feat\n\n# Using numpy, determine the Euclidean distance between the vectors, float\ndef dist(p, q):\n '''Takes in the actual vector (determined by neural net) and determines distance from output vectors'''\n return numpy.linalg.norm(numpy.array(p) - numpy.array(q))\n\ndef chunks(l, n):\n '''\n Yield successive n-sized chunks from list.\n Apply list function to return list\n '''\n for i in range(0, len(l), n):\n yield l[i:i+n]\n\ndef smooth(p):\n '''\n Error smoothing function\n Takes the vector output by the neural network\n Takes dictionary that converts suffix to relevant tuple\n Converts those partitions to the closest phoneme vectors\n '''\n # Partition the input vector into individual phonemes\n chunked_list = list(chunks(p, n_feat))\n\n # Get list of phoneme tuples\n phoneme_tuples = phon_to_feat.values()\n\n output_tuple = ()\n\n # For potential phoneme in tuple, find closest phoneme to it using dictionary keying distance to phoneme\n for phoneme in chunked_list:\n dist_from_realphon = {dist(phoneme, phoneme_tuples[i]): phoneme_tuples[i] for i in range(len(phoneme_tuples))}\n smoothed_vector = min(dist_from_realphon.keys())\n output_tuple += dist_from_realphon[smoothed_vector]\n\n return output_tuple\n","sub_path":"Cleanup20160624/Main Code 9/smooth.py","file_name":"smooth.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"474454026","text":"from pycore.schema_gen import set_schema_enums\n\ncustomHeaderPrefix = \"X-OA-\"\n\ndef CH(headerName):\n return customHeaderPrefix + headerName\n\nAppKeys = {\n \"open-account\": \"130da3dc2a9bb1893d5bf85e3c67452d\",\n}\n\ndef get_ok_schema(data_schema={\"type\": \"object\" }):\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"data\": data_schema,\n \"ok\": { \"type\": \"boolean\", \"enum\": [True] },\n \"reason\": { \"type\": \"string\", \"enum\": [\"\"]}\n },\n \"required\": [ \"ok\",\"reason\", \"data\" ]\n }\n return schema\n\ndef get_fail_schema(reason=\"\"):\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"data\": {\"type\": \"object\"},\n \"ok\": { \"type\": \"boolean\", \"enum\": [False] },\n \"reason\": { \"type\": \"string\", \"enum\": [reason]}\n },\n \"required\": [ \"ok\",\"reason\" ]\n }\n return schema\n\ndef get_userinfo_detail_schema(enums=None):\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"id\": { \"type\": \"integer\" },\n \"uid\": { \"type\": \"string\"},\n \"tel\": { \"type\": \"string\"},\n \"nickname\": { \"type\": \"string\"},\n \"avatar\": { \"type\": \"string\" },\n \"sex\": { \"type\": \"integer\"},\n \"birthday\": { \"type\": \"string\" },\n \"userType\": { \"type\": \"integer\"},\n \"regInviteCode\": { \"type\": \"string\" },\n \"inviteCode\": { \"type\": \"string\"},\n \"createTime\": { \"type\": \"integer\"}\n },\n \"required\": [ \"id\", \"uid\", \"tel\", \"nickname\", \"avatar\", \"sex\", \"birthday\", \"userType\", \"regInviteCode\", \"inviteCode\", \"createTime\"]\n }\n if enums:\n set_schema_enums(schema['properties'], enums)\n return schema\n\n\ndef get_user_login_schema(enums=None):\n data_schema = {\n \"type\": \"object\",\n \"properties\": {\n \"token\": { \"type\": \"string\" },\n \"userInfo\": get_userinfo_detail_schema(enums=enums)\n },\n \"required\": [ \"token\", \"userInfo\" ]\n }\n schema = get_ok_schema(data_schema)\n return schema\n\ndef get_userinfo_schema(enums=None):\n data_schema = {\n \"type\": \"object\",\n \"properties\": {\n \"userInfo\": get_userinfo_detail_schema(enums=enums)\n },\n \"required\": [\"userInfo\" ]\n }\n schema = get_ok_schema(data_schema)\n return schema\n\ndef get_sms_code_schema():\n data_schema = {\n \"type\": \"object\",\n \"properties\": {\n \"code\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\n \"code\"\n ]\n }\n schema = get_ok_schema(data_schema)\n return schema","sub_path":"test/pycore/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"338926730","text":"from common_fixtures import * # NOQA\nimport websocket as ws\nimport pytest\n\n\ndef get_logs(client):\n hosts = client.list_host(kind='docker', removed_null=True)\n assert len(hosts) > 0\n in_log = random_str()\n cmd = '/bin/bash -c \"echo {}; sleep 2\"'.format(in_log)\n c = client.create_container(imageUuid=TEST_IMAGE_UUID, command=cmd)\n c = client.wait_success(c)\n logs = c.logs()\n return logs, in_log, c\n\n\ndef test_logs_token(client):\n logs, in_log, c = get_logs(client)\n conn = ws.create_connection(logs.url + '?token='+logs.token)\n result = conn.recv()\n assert result is not None\n assert in_log in result\n\n delete_all(client, [c])\n\n\ndef test_logs_no_token(client):\n logs, _, c = get_logs(client)\n with pytest.raises(Exception) as excinfo:\n ws.create_connection(logs.url)\n assert 'Handshake status 401' in str(excinfo.value)\n delete_all(client, [c])\n\n\ndef test_host_api_garbage_token(client):\n logs, _, c = get_logs(client)\n with pytest.raises(Exception) as excinfo:\n ws.create_connection(logs.url+'?token=random.garbage.token')\n assert 'Handshake status 401' in str(excinfo.value)\n delete_all(client, [c])\n","sub_path":"tests/validation/cattlevalidationtest/core/test_logs_api.py","file_name":"test_logs_api.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"47306854","text":"# -*- coding: utf-8 -*- \n\"\"\"\nIMU Plugin\nCopyright (C) 2010-2012 Olaf Lüke \n\nimu_gl_widget.py: IMU OpenGL representation\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License \nas published by the Free Software Foundation; either version 2 \nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nGeneral Public License for more details.\n\nYou should have received a copy of the GNU General Public\nLicense along with this program; if not, write to the\nFree Software Foundation, Inc., 59 Temple Place - Suite 330,\nBoston, MA 02111-1307, USA.\n\"\"\"\n\nfrom PyQt4.QtOpenGL import QGLWidget\n\nfrom OpenGL.GL import GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_DEPTH_TEST, GL_LESS, GL_MODELVIEW, GL_POLYGON, GL_PROJECTION, GL_SMOOTH, glBegin, glClear, glClearColor, glClearDepth, glColor3f, glDepthFunc, glEnable, glEnd, glLoadIdentity, glMatrixMode, glPopMatrix, glPushMatrix, glShadeModel, glTranslatef, glVertex3fv, glViewport, glScalef, glMultMatrixf, GL_LINES, glLineWidth\nfrom OpenGL.GLU import gluPerspective\n\nclass IMUGLWidget(QGLWidget):\n def __init__(self, parent=None, name=None):\n QGLWidget.__init__(self, parent, name)\n self.parent = parent\n \n# col = parent.palette().background().color()\n# self.color_background = (col.redF(), col.greenF(), col.blueF(), 1.0)\n self.color_background = (0.85, 0.85, 0.85, 1.0)\n self.color_led_red = (1.0, 0.0, 0.0)\n self.color_led_green = (0.0, 1.0, 0.0)\n self.color_board = (0.0, 0.7, 0.0)\n self.color_connector = (0.0, 0.0, 0.0)\n \n self.vertices = (\n (-1.0,-1.0,-1.0),\n (1.0,-1.0,-1.0),\n (1.0,1.0,-1.0), \n (-1.0,1.0,-1.0), \n (-1.0,-1.0,1.0),\n (1.0,-1.0,1.0), \n (1.0,1.0,1.0), \n (-1.0,1.0,1.0)\n )\n \n self.pins = [(-0.8, -0.9), (-0.8, -0.65), (-0.8, -0.4), \n (-0.6, -0.9), (-0.6, -0.65), (-0.6, -0.4),\n (0.9, 0.8), (0.65, 0.8), (0.4, 0.8), (0.15, 0.8), \n (0.9, 0.6), (0.65, 0.6), (0.4, 0.6), (0.15, 0.6)]\n\n self.m = [[1, 0, 0, 0], \n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\n\n self.rel_x = 0\n self.rel_y = 0\n self.rel_z = 0\n self.rel_w = 0\n \n self.save_orientation_flag = False\n \n def update(self, x, y, z, w):\n if self.save_orientation_flag:\n self.rel_x = x\n self.rel_y = y\n self.rel_z = z\n self.rel_w = w\n self.save_orientation_flag = False\n self.parent.orientation_label.setText(\"\")\n self.parent.orientation_label.setFixedHeight(0)\n \n # conjugate\n x = -x\n y = -y\n z = -z\n \n wn = w * self.rel_w - x * self.rel_x - y * self.rel_y - z * self.rel_z\n xn = w * self.rel_x + x * self.rel_w + y * self.rel_z - z * self.rel_y\n yn = w * self.rel_y - x * self.rel_z + y * self.rel_w + z * self.rel_x\n zn = w * self.rel_z + x * self.rel_y - y * self.rel_x + z * self.rel_w\n\n x = xn\n y = yn\n z = zn\n w = wn\n \n xx = x * x\n yy = y * y\n zz = z * z\n xy = x * y\n xz = x * z\n yz = y * z\n wx = w * x\n wy = w * y\n wz = w * z\n\n self.m = [[1.0 - 2.0*(yy + zz), 2.0*(xy - wz), 2.0*(xz + wy), 0.0],\n [2.0*(xy + wz), 1.0 - 2.0*(xx + zz), 2.0*(yz - wx), 0.0],\n [2.0*(xz - wy), 2.0*(yz + wx), 1.0 - 2.0*(xx + yy), 0.0],\n [0.0, 0.0, 0.0, 1.0]]\n \n self.updateGL()\n\n def initializeGL(self): \n glClearColor(*self.color_background) \n glClearDepth(1.0) \n glDepthFunc(GL_LESS) \n glEnable(GL_DEPTH_TEST) \n glShadeModel(GL_SMOOTH) \n \n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() \n \n glMatrixMode(GL_MODELVIEW)\n \n def resizeGL(self, width, height):\n if height == 0: \n height = 1\n \n glViewport(0, 0, width, height) \n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(width)/float(height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n \n # main drawing function. \n def paintGL(self):\n if self.parent.ipcon == None:\n return \n \n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glLoadIdentity()\n \n # Move Right And Into The Screen\n glTranslatef(0.0, 0.0, -5.0) \n\n glMultMatrixf(self.m)\n \n # Draw board\n glColor3f(*self.color_board)\n glColor3f(0.0, 0.0, 0.0)\n self.draw_cuboid(1.0, 1.0, 0.1)\n \n # Draw USB connector\n glColor3f(0.5, 0.51, 0.58)\n glPushMatrix()\n glTranslatef(0.0, -0.8, 0.2)\n self.draw_cuboid(0.2, 0.25, 0.1)\n glPopMatrix()\n \n # Draw button right\n glPushMatrix()\n glColor3f(0.5, 0.51, 0.58)\n glTranslatef(0.65, -0.95, 0.125)\n self.draw_cuboid(0.1, 0.075, 0.05)\n glColor3f(0.0, 0.0, 0.0)\n glTranslatef(0.0, -0.075, 0.0)\n self.draw_cuboid(0.05, 0.025, 0.045)\n glPopMatrix()\n \n # Draw button left\n glPushMatrix()\n glColor3f(0.5, 0.51, 0.58)\n glTranslatef(-0.65, -0.95, 0.125)\n self.draw_cuboid(0.1, 0.075, 0.05)\n glColor3f(0.0, 0.0, 0.0)\n glTranslatef(0.0, -0.075, 0.0)\n self.draw_cuboid(0.05, 0.025, 0.045)\n glPopMatrix()\n \n # Draw btb left top\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslatef(-0.75, 0.0, 0.25)\n self.draw_cuboid(0.13, 0.5, 0.15)\n glPopMatrix()\n \n # Draw btb right top\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslatef(0.75, 0.0, 0.25)\n self.draw_cuboid(0.13, 0.5, 0.15)\n glPopMatrix()\n \n # Draw btb left bottom\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslatef(-0.75, 0.0, -0.2)\n self.draw_cuboid(0.13, 0.5, 0.1)\n glPopMatrix()\n \n # Draw btb right bottom\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslatef(0.75, 0.0, -0.2)\n self.draw_cuboid(0.13, 0.5, 0.1)\n glPopMatrix()\n \n # Draw bricklet port left\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslatef(-0.425, 0.9, -0.125)\n self.draw_cuboid(0.325, 0.1, 0.05)\n glPopMatrix()\n \n \n # Draw bricklet port right\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslatef(0.425, 0.9, -0.125)\n self.draw_cuboid(0.325, 0.1, 0.05)\n glPopMatrix()\n \n # Draw Axis\n glPushMatrix()\n glTranslatef(-1.2, -1.2, -0.3)\n glLineWidth(5.0)\n \n glBegin(GL_LINES)\n glColor3f(1,0,0) # x axis is red\n glVertex3fv((0,0,0))\n glVertex3fv((2,0,0))\n glColor3f(0,0.5,0) # y axis is green\n glVertex3fv((0,0,0))\n glVertex3fv((0,2,0))\n glColor3f(0,0,1) # z axis is blue\n glVertex3fv((0,0,0))\n glVertex3fv((0,0,2))\n glEnd()\n \n glPopMatrix()\n \n def polygon(self, a, b, c, d):\n # draw a polygon\n glBegin(GL_POLYGON)\n glVertex3fv(self.vertices[a])\n glVertex3fv(self.vertices[b])\n glVertex3fv(self.vertices[c])\n glVertex3fv(self.vertices[d])\n glEnd()\n\n def cube(self):\n # map vertices to faces\n self.polygon(0, 3, 2, 1)\n self.polygon(2, 3, 7, 6)\n self.polygon(4, 7, 3, 0)\n self.polygon(1, 2, 6, 5)\n self.polygon(7, 4, 5, 6)\n self.polygon(5, 4, 0, 1)\n\n def draw_cuboid(self, x, y, z):\n glPushMatrix()\n glScalef(x, y, z) # size cuboid\n self.cube()\n glPopMatrix()\n\n def save_orientation(self):\n self.save_orientation_flag = True\n","sub_path":"src/brickv/plugin_system/plugins/imu/imu_gl_widget.py","file_name":"imu_gl_widget.py","file_ext":"py","file_size_in_byte":8417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"34150489","text":"from importlib.machinery import SourceFileLoader\nimport unittest\n\nsalt_test_case = SourceFileLoader('salt_test_case', \"salt_test_case.py\").load_module()\ndocker = SourceFileLoader('docker', '../_modules/paas_docker.py').load_module()\n\n\nclass Testinstance(unittest.TestCase, salt_test_case.SaltTestCase):\n\n def setUp(self):\n self.initialize_mocks()\n self.instance = docker\n\n self.mock_pillar('data/paas_docker.yaml')\n\n self.mock_grains()\n self.grains['id'] = 'egladil'\n\n def test_get_image(self):\n container = {\n \"image\": \"foo\",\n \"version\": \"42\"\n }\n\n self.assertEqual(\"foo:42\", docker.get_image(\"not_foo\", container))\n\n def test_get_image_without_version(self):\n container = {\n \"image\": \"foo\",\n }\n\n self.assertEqual(\"foo\", docker.get_image(\"not_foo\", container))\n\n def test_get_image_without_image(self):\n container = {\n \"version\": \"42\"\n }\n\n self.assertEqual(\"not_foo:42\", docker.get_image(\"not_foo\", container))\n\n def test_get_image_without_anything(self):\n self.assertEqual(\"not_foo\", docker.get_image(\"not_foo\", {}))\n\n def test_get_image_with_numeric_version(self):\n container = {\n \"image\": \"foo\",\n \"version\": 2.5\n }\n\n self.assertEqual(\"foo:2.5\", docker.get_image(\"not_foo\", container))\n\n def test_get_subnets(self):\n expected = ['172.18.1.0/24', '172.18.2.0/24', '172.17.0.0/16']\n\n self.assertEqual(expected, docker.get_subnets())\n\n def test_get_subnets_when_none_are_defined(self):\n # Only the default Docker one\n expected = ['172.17.0.0/16']\n\n self.grains['id'] = 'voidserver'\n self.assertEqual(expected, docker.get_subnets())\n","sub_path":"_tests/modules/test_paas_docker.py","file_name":"test_paas_docker.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"258591548","text":"#!/usr/bin/env python\n\nimport os\nimport json\nimport sys\n\ndef print_usage():\n print(\"USAGE: load_envvars.py DEPLOY_TO\")\n exit(1)\n\ndef get_script_path():\n return os.path.dirname(os.path.realpath(__file__))\n\ndef main():\n if len(sys.argv) < 2:\n print_usage()\n\n env = sys.argv[1]\n\n zsf_path = os.path.join(get_script_path(), \"zappa_settings.json\")\n with open(zsf_path) as zsf:\n settings = json.load(zsf)\n\n environment_variables = settings[env]['aws_environment_variables']\n for envvar in environment_variables:\n print(\"export {}='{}'\".format(envvar, environment_variables[envvar]))\n\nif __name__ == \"__main__\":\n main()","sub_path":"load_envvars.py","file_name":"load_envvars.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"383845357","text":"from math import sqrt\nimport numpy as np\nfrom history import History\n\ndef universal_similar_triangles_method(oracle, prox, primal_dual_oracle,\n t_start, L_init = None, max_iter = 1000,\n eps = 1e-5, eps_abs = None, stop_crit = 'dual_gap_rel',\n verbose_step = 100, verbose = False, save_history = False):\n if stop_crit == 'dual_gap_rel':\n def crit():\n return duality_gap <= eps * duality_gap_init\n elif stop_crit == 'dual_gap':\n def crit():\n return duality_gap <= eps_abs\n elif stop_crit == 'max_iter':\n def crit():\n return it_counter == max_iter\n elif callable(stop_crit):\n crit = stop_crit\n else:\n raise ValueError(\"stop_crit should be callable or one of the following names: \\\n 'dual_gap', 'dual_gap_rel', 'max iter'\")\n \n L_value = L_init if L_init is not None else np.linalg.norm(oracle.grad(t_start))\n \n A_prev = 0.0\n y_start = u_prev = t_prev = np.copy(t_start)\n A = u = t = y = None\n \n grad_sum = None\n grad_sum_prev = np.zeros(len(t_start))\n\n flows_weighted = primal_dual_oracle.get_flows(y_start) \n primal, dual, duality_gap_init, state_msg = primal_dual_oracle(flows_weighted, y_start)\n if save_history:\n history = History('iter', 'primal_func', 'dual_func', 'dual_gap', 'inner_iters')\n history.update(0, primal, dual, duality_gap_init, 0)\n if verbose:\n print(state_msg)\n if eps_abs is None:\n eps_abs = eps * duality_gap_init\n \n success = False\n inner_iters_num = 0\n \n for it_counter in range(1, max_iter+1):\n while True:\n inner_iters_num += 1\n \n alpha = 0.5 / L_value + sqrt(0.25 / L_value**2 + A_prev / L_value)\n A = A_prev + alpha\n\n y = (alpha * u_prev + A_prev * t_prev) / A\n grad_y = oracle.grad(y)\n flows = primal_dual_oracle.get_flows(y) #grad() is called here\n grad_sum = grad_sum_prev + alpha * grad_y\n u = prox(grad_sum / A, y_start, 1.0 / A)\n t = (alpha * u + A_prev * t_prev) / A\n\n left_value = (oracle.func(y) + np.dot(grad_y, t - y) + \n 0.5 * alpha / A * eps_abs) - oracle.func(t)\n right_value = - 0.5 * L_value * np.sum((t - y)**2)\n if left_value >= right_value:\n break\n else:\n L_value *= 2\n \n A_prev = A\n L_value /= 2\n \n t_prev = t\n u_prev = u\n grad_sum_prev = grad_sum\n flows_weighted = (flows_weighted * (A - alpha) + flows * alpha ) / A\n \n primal, dual, duality_gap, state_msg = primal_dual_oracle(flows_weighted, t)\n if save_history:\n history.update(it_counter, primal, dual, duality_gap, inner_iters_num)\n if verbose and (it_counter % verbose_step == 0):\n print('\\nIterations number: {:d}'.format(it_counter))\n print('Inner iterations number: {:d}'.format(inner_iters_num))\n print(state_msg, flush = True)\n if crit():\n success = True\n break\n \n result = {'times': t, 'flows': flows_weighted,\n 'iter_num': it_counter,\n 'res_msg': 'success' if success else 'iterations number exceeded'}\n if save_history:\n result['history'] = history.dict\n if verbose:\n print('\\nResult: ' + result['res_msg'])\n print('Total iters: ' + str(it_counter))\n print(state_msg)\n print('Oracle elapsed time: {:.0f} sec'.format(oracle.time))\n return result\n\n#print('Dijkstra elapsed time: {:.0f} sec'.format(oracle.auto_oracles_time))\n\n#criteria: stable dynamic 'dual_threshold' AND 'primal_threshold', 'dual_rel' AND 'primal_rel'. \n\n#beckman : + 'dual_gap_rel', 'dual_gap_threshold', 'primal_threshold', 'primal_rel'\n\n#criteria: 'star_solution_residual',\n\n#practice: 'dual_rel'\n\n\n# if crit_name == 'dual_gap_rel':\n# def crit():\n# nonlocal duality_gap, duality_gap_init, eps\n# return duality_gap < eps * duality_gap_init\n# if crit_name == 'dual_rel':\n# def crit():\n# nonlocal dual_func_history, eps\n# l = len(dual_func_history)\n# return dual_func_history[l // 2] - dual_func_history[-1] \\\n# < eps * (dual_func_history[0] - dual_func_history[-1])\n# if crit_name == 'primal_rel':\n# def crit():\n# nonlocal primal_func_history, eps\n# l = len(primal_func_history)\n# return primal_func_history[l // 2] - primal_func_history[-1] \\\n# < eps * (primal_func_history[0] - primal_func_history[-1])","sub_path":"Stable Dynamic & Beckman/grad_methods/universal_similar_triangles_method.py","file_name":"universal_similar_triangles_method.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"632222681","text":"from typing import List, Optional\nfrom cloudrail.knowledge.context.gcp.resources.binary_authorization.gcp_binary_authorization_policy import GcpClusterContainerBinaryAuthorizationPolicy, \\\n GcpBinaryAuthorizationAdmissionRuleType, GcpBinaryAuthorizationAdmissionRule, GcpBinaryAuthorizationAdmissionEvaluationMode, \\\n GcpBinaryAuthorizationAdmissionEnforcementMode\nfrom cloudrail.knowledge.context.gcp.resources.constants.gcp_resource_type import GcpResourceType\nfrom cloudrail.knowledge.context.gcp.resources_builders.terraform.base_gcp_terraform_builder import BaseGcpTerraformBuilder\nfrom cloudrail.knowledge.utils.enum_utils import enum_implementation\n\n\nclass BinaryAuthorizationPolicyBuilder(BaseGcpTerraformBuilder):\n\n def do_build(self, attributes: dict) -> GcpClusterContainerBinaryAuthorizationPolicy:\n cluster_admission_rules: List[GcpBinaryAuthorizationAdmissionRule] = []\n for rule in self._get_known_value(attributes, 'cluster_admission_rules', []):\n cluster_admission_rules.append(self._build_admission_rule(rule, GcpBinaryAuthorizationAdmissionRuleType.CLUSTER, rule['cluster']))\n global_policy_evaluation_mode_enabled = self._get_known_value(attributes, 'global_policy_evaluation_mode') == 'ENABLE'\n return GcpClusterContainerBinaryAuthorizationPolicy(default_admission_rule=self._build_admission_rule(attributes['default_admission_rule'][0],\n GcpBinaryAuthorizationAdmissionRuleType.DEFAULT),\n cluster_admission_rules=cluster_admission_rules,\n global_policy_evaluation_mode_enabled=global_policy_evaluation_mode_enabled)\n\n def get_service_name(self) -> GcpResourceType:\n return GcpResourceType.GOOGLE_BINARY_AUTHORIZATION_POLICY\n\n @classmethod\n def _build_admission_rule(cls, attributes: dict, rule_type: GcpBinaryAuthorizationAdmissionRuleType, cluster_id: Optional[str] = None):\n return GcpBinaryAuthorizationAdmissionRule(admission_rule_type=rule_type,\n evaluation_mode=enum_implementation(GcpBinaryAuthorizationAdmissionEvaluationMode,\n cls._get_known_value(attributes, 'evaluation_mode')),\n enforcement_mode=enum_implementation(GcpBinaryAuthorizationAdmissionEnforcementMode,\n cls._get_known_value(attributes, 'enforcement_mode')),\n cluster_id=cluster_id)\n","sub_path":"cloudrail/knowledge/context/gcp/resources_builders/terraform/binary_authorization_policy_builder.py","file_name":"binary_authorization_policy_builder.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"430759180","text":"from keras.models import Sequential\nfrom keras.layers import convolutional\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.optimizers import SGD, Adam\nimport keras.backend as K\nfrom keras.utils import np_utils\nfrom keras.models import model_from_json\nfrom keras.utils.visualize_util import plot\n\nimport numpy as np\n\nclass PolicyNetWork(object):\n def __init__(self):\n self.batch_size = 1\n self.nb_classes = 64\n self.nb_epoch = 1\n self.nb_layer = 7\n\n def create_network(self,):\n network = Sequential()\n\n # create first layer\n network.add(convolutional.Convolution2D(\n nb_filter=128,\n nb_row=5,\n nb_col=5,\n input_shape=(1,8,8),\n init=\"uniform\",\n activation=\"relu\",\n border_mode=\"same\"))\n\n # create all other layers\n for i in range(2, self.nb_layer):\n network.add(convolutional.Convolution2D(\n nb_filter=128,\n nb_row=3,\n nb_col=3,\n init=\"uniform\",\n activation=\"relu\",\n border_mode=\"same\"))\n\n # the last layer maps each feature to a number\n network.add(convolutional.Convolution2D(\n nb_filter=1,\n nb_row=1,\n nb_col=1,\n init=\"uniform\",\n border_mode=\"same\"))\n\n # reshape output to be board x board\n network.add(Flatten())\n network.add(Activation(\"relu\"))\n network.add(Dense(self.nb_classes))\n\n #softmax makes it into a probability distribution\n network.add(Activation(\"softmax\"))\n\n sgd = SGD(lr=.03, decay=.0001)\n adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n network.compile(loss=\"categorical_crossentropy\",\n optimizer=adam,\n metrics=[\"accuracy\"])\n return network\n\nif __name__ == \"__main__\":\n\n pcnn = PolicyNetWork()\n model = pcnn.create_network()\n json_string = model.to_json()\n plot(model, to_file='model.png')\n open('pcnn.json', 'w').write(json_string)\n print(\"Checking json file...\")\n\n model = model_from_json(open('pcnn.json').read())\n model.load_weights('/Users/kento_watanabe/Desktop/work/Data_of_Othello/weights_24.hdf5')\n\n print(\"OK!\")\n\n from othello import Othello\n board = np.asarray(\n [[0,0,0,0,0,0,0,0,],\n [0,0,0,0,0,0,0,0,],\n [0,0,0,0,0,0,0,0,],\n [0,0,0,2,1,0,0,0,],\n [0,0,0,1,2,0,0,0,],\n [0,0,0,0,0,0,0,0,],\n [0,0,0,0,0,0,0,0,],\n [0,0,0,0,0,0,0,0,]\n ])\n\n board = board.reshape((1,1,8,8))\n argsort = np.argsort(model.predict(board))\n\n print(argsort[0][::-1])\n","sub_path":"BetaOthello/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"321713361","text":"import logging\nfrom collections import namedtuple\nimport pytest\n\nfrom actions.card_actions import CardActions\nfrom actions.payload_generator import PayloadGenerator\nfrom actions.user_actions import UserActions\nfrom base.base_test import BaseTest\nfrom utils.utils_helper import UtilsHelper\nfrom verifications.card_verifications import CardVerifications\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestCardCreation(BaseTest):\n\n @pytest.fixture(scope='module')\n def resources(self):\n # Create user\n user_client = UserActions()\n user_client.create_user(PayloadGenerator.user_payload())\n\n # Create card product\n card_client = CardActions()\n card_client.create_card_product(\n PayloadGenerator.card_product_payload())\n\n Data = namedtuple('Data', 'user_client, user_token, card_client,'\n 'card_product_token')\n\n return Data(user_client=user_client,\n user_token=user_client.user_token,\n card_client=card_client,\n card_product_token=card_client.product_token)\n\n @pytest.mark.all_test\n @pytest.mark.smoke_test\n # @pytest.mark.skip(reason=\"Test Disable\")\n def test_create_card_success(self, resources):\n \"\"\"\n Test create a new card successfully\n \"\"\"\n #\n # ================ CONFIGURATION ================\n #\n card_details = PayloadGenerator.card_payload(\n user_token=resources.user_token,\n card_product_token=resources.card_product_token)\n\n #\n # ================ ACTION ================\n #\n card = resources.card_client.create_card(card_details)\n\n #\n # ================ VERIFICATION ================\n #\n CardVerifications.verify_card_creation_success(card, resources)\n\n @pytest.mark.all_test\n # @pytest.mark.skip(reason=\"Test Disable\")\n def test_create_multiple_cards_same_user_product_success(self, resources):\n \"\"\"\n Test create multiple cards for same user and card product successfully\n \"\"\"\n #\n # ================ CONFIGURATION ================\n #\n card_details = PayloadGenerator.card_payload(\n user_token=resources.user_token,\n card_product_token=resources.card_product_token)\n\n #\n # ================ ACTION ================\n #\n card1 = resources.card_client.create_card(card_details)\n card2 = resources.card_client.create_card(card_details)\n\n #\n # ================ VERIFICATION ================\n #\n CardVerifications.verify_multiple_cards_same_user_product_success(\n card1, card2)\n\n @pytest.mark.all_test\n # @pytest.mark.skip(reason=\"Test Disable\")\n def test_create_personalized_card_with_name_success(self, resources):\n \"\"\"\n Test create a new personalized card with custom name successfully\n \"\"\"\n #\n # ================ CONFIGURATION ================\n #\n custom_name = \"custom_name_\" + UtilsHelper.time_stamp()\n fulfillment_details = {\n \"card_personalization\": {\n \"text\": {\n \"name_line_1\": {\n \"value\": custom_name\n }\n }\n }\n }\n\n card_details = PayloadGenerator.card_payload(\n user_token=resources.user_token,\n card_product_token=resources.card_product_token,\n fulfillment=fulfillment_details)\n\n #\n # ================ ACTION ================\n #\n card = resources.card_client.create_card(card_details)\n\n #\n # ================ VERIFICATION ================\n #\n CardVerifications.verify_card_creation_custom_name_success(card,\n custom_name)\n\n @pytest.mark.all_test\n # @pytest.mark.skip(reason=\"Test Disable\")\n def test_create_card_without_user_token_fail(self, resources):\n \"\"\"\n Test create a new card without user token unsuccessfully\n \"\"\"\n #\n # ================ CONFIGURATION ================\n #\n card_details = PayloadGenerator.card_payload(\n user_token='',\n card_product_token=resources.card_product_token)\n\n #\n # ================ ACTION ================\n #\n card = resources.card_client.create_card(card_details)\n\n #\n # ================ VERIFICATION ================\n #\n CardVerifications.verify_no_user_token_card_creation_fail(card)\n\n @pytest.mark.all_test\n # @pytest.mark.skip(reason=\"Test Disable\")\n def test_create_card_with_invalid_product_token_fail(self, resources):\n \"\"\"\n Test create a new card with invalid product token unsuccessfully\n \"\"\"\n #\n # ================ CONFIGURATION ================\n #\n card_details = PayloadGenerator.card_payload(\n user_token=resources.user_token,\n card_product_token='invalid_token')\n\n #\n # ================ ACTION ================\n #\n card = resources.card_client.create_card(card_details)\n\n #\n # ================ VERIFICATION ================\n #\n CardVerifications.verify_invalid_product_token_card_creation_fail(card)\n","sub_path":"tests/test_card_creation.py","file_name":"test_card_creation.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"633242032","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n#import training data from train.csv divide into x and y\ndef train_data():\n\ta= pd.read_csv(\"train.csv\")\n\tdata= np.array(a)\n\ty=data[:,0]\n\tx=data[:,1:]\n\ty=np.reshape(y,(42000,1))\n\ty_data=np.zeros((y.shape[0],10))\n\tfor i in xrange(42000):\n\t\tind=y[i]\n\t\ty_data[i][ind]=1\n\treturn x,y_data\n\n#import test data from test.csv\ndef test_data():\n\tb= pd.read_csv(\"test.csv\")\n\tdata= np.array(b)\n\treturn data\n\ndef image(x):\n\tplt.imshow(x,cmap=plt.get_cmap('gray'))\n\tplt.show()\n#show the grayscale pixels\ndef show(x):\n\tx=np.reshape(x,(28,28))\n\tplt.imshow(x,cmap=plt.get_cmap('gray'))\n\tplt.show()\n\n#sigmoid function\ndef sigmoid(z):\n\treturn 1/(1+np.e**(-z))\n\n#derivative sigmoid function\ndef der_sigmoid(z):\n\ta=sigmoid(z)\n\treturn a*(1-a)","sub_path":"dat.py","file_name":"dat.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"17403575","text":"#!/usr/bin/python3\n\nfrom flask import Flask, abort, redirect, url_for, render_template\nimport psycopg2\nimport sys\n\napp = Flask(__name__)\n\nconn = psycopg2.connect(\n database='safetydome',\n user='flask',\n password='flask',\n host='localhost'\n )\ncur = conn.cursor()\n\n\nclass Battle():\n \"\"\"Battle object used to store battle combatants and statistics\"\"\"\n __tablename__ = 'Battle'\n\n def __init__(\n self,\n id,\n one_id,\n two_id,\n start=None,\n stop=None,\n one_name=None,\n two_name=None\n ):\n self.id = id\n self.one_id = one_id\n self.two_id = two_id\n self.one_name = one_name\n self.two_name = two_name\n self.start = start\n self.stop = stop\n\n\nclass Combatant():\n \"\"\"Combatant class stores fighter attributes\"\"\"\n __tablename__ = 'combatant'\n\n def __init__(\n self,\n comb_id,\n comb_name,\n comb_species,\n atk_name=None,\n atk_type=None,\n atk_mn_dmg=None,\n atk_mx_dmg=None,\n atk_spd=None\n ):\n self.id = comb_id\n self.name = comb_name\n self.species = comb_species\n self.attack = []\n self.attack.append(atk_name)\n self.attack_type = []\n self.attack_type.append(atk_type)\n self.min_dmg = []\n self.min_dmg.append(atk_mn_dmg)\n self.max_dmg = []\n self.max_dmg.append(atk_mx_dmg)\n self.speed = []\n self.speed.append(atk_spd)\n\n\n@app.route('/')\ndef index_proc():\n \"\"\"Default index.html function. Renders initial page\"\"\"\n return render_template('index.html')\n\n\n@app.route('/combatant')\ndef combatant_proc():\n \"\"\"Function that processes the list of combatants\"\"\"\n query_combatant = \"SELECT combatant.id, combatant.name, species.name \"\n query_combatant += \"FROM public.combatant, public.species \"\n query_combatant += \"WHERE combatant.species_id = species.id \"\n query_combatant += \"ORDER by combatant.name\"\n\n comb_objs = []\n\n # Connection to retrieve combatants\n try:\n cur.execute(query_combatant)\n data_combatants = cur.fetchall()\n except Exception as e:\n failure = \"\\n -Failed to query combatant data. {0}\"\n print(failure.format(e), file=sys.stderr)\n abort(404)\n\n for entry in data_combatants:\n current = Combatant(entry[0], entry[1], entry[2])\n comb_objs.append(current)\n\n return render_template('combatants.html', combatants=comb_objs)\n\n\n@app.route('/combatant/')\ndef fighter_proc(id=None):\n \"\"\"Processes the request for a specific combatant id\"\"\"\n if (str(id).isnumeric() is not True):\n abort(404)\n query_fighter = \"SELECT combatant.id, combatant.name, species.name, \"\n query_fighter += \"attack.name, attack.type, attack.min_dmg, \"\n query_fighter += \"attack.max_dmg, attack.speed \"\n query_fighter += \"FROM public.combatant, public.species, \"\n query_fighter += \"public.species_attack, public.attack \"\n query_fighter += \"WHERE combatant.species_id = species.id AND \"\n query_fighter += \"species.id = species_attack.species_id AND \"\n query_fighter += \"species_attack.attack_id = attack.id AND combatant.id = \"\n query_fighter += str(id)\n\n # Connection to retrieve fighter data\n try:\n cur.execute(query_fighter)\n data_fighter = cur.fetchall()\n except Exception as e:\n failure = \"\\n -Failed to query fighter data. {0}\"\n print(failure.format(e), file=sys.stderr)\n abort(404)\n\n # Create initial Combatant Object\n fighter = Combatant(\n data_fighter[0][0],\n data_fighter[0][1],\n data_fighter[0][2]\n )\n for data in data_fighter:\n fighter.attack.append(data[3])\n fighter.attack_type.append(data[4])\n fighter.min_dmg.append(data[5])\n fighter.max_dmg.append(data[6])\n fighter.speed.append(data[7])\n\n return render_template('fighter.html', fighter=fighter)\n\n\n@app.route('/battle')\n@app.route('/battle/')\n@app.route('/battle/-')\ndef battle_proc(id=None, id2=None):\n \"\"\"Function handles all battle html page calls\"\"\"\n # Single ID passed to /battle\n if (id is not None and id2 is None):\n if (str(id).isnumeric() is not True):\n abort(404)\n query_fights = \"SELECT fight.id, fight.combatant_one, \"\n query_fights += \"fight.combatant_two, fight.winner, fight.start, \"\n query_fights += \"fight.finish FROM public.fight WHERE fight.id = \"\n query_fights += str(id)\n\n # Two ID's passed to /battle\n elif (id is not None and id2 is not None):\n if (str(id).isnumeric() is not True):\n if (str(id2).isnumeric is not True):\n abort(404)\n query_fights = \"SELECT fight.id, fight.combatant_one, \"\n query_fights += \"fight.combatant_two, fight.winner, fight.start, \"\n query_fights += \"fight.finish FROM public.fight WHERE combatant_one = \"\n query_fights += str(id)\n query_fights += \" AND combatant_two = \"\n query_fights += str(id2)\n query_fights += \" OR combatant_one = \"\n query_fights += str(id2)\n query_fights += \" AND combatant_two = \"\n query_fights += str(id)\n\n # No ID's passed to /battle\n else:\n query_fights = \"SELECT fight.id, fight.combatant_one, \"\n query_fights += \"fight.combatant_two, fight.winner, fight.start, \"\n query_fights += \"fight.finish FROM public.fight\"\n\n # Connection to retrieve fight data\n try:\n cur.execute(query_fights)\n data_fight = cur.fetchall()\n except Exception as e:\n failure = \"\\n -Failed to query fight data. {0}\"\n print(failure.format(e), file=sys.stderr)\n abort(404)\n\n # Create array of battles\n fights = []\n for data in data_fight:\n new = Battle(data[0], data[1], data[2], data[4], data[5])\n\n # Retrieve Fighter one\n query_one = \"SELECT combatant.name FROM public.combatant WHERE \"\n query_one += \"combatant.id = \"\n query_one += str(new.one_id)\n\n try:\n cur.execute(query_one)\n name_one = cur.fetchall()\n except:\n print(\"\\n -Failed to query name. {0}\".format(e), file=sys.stderr)\n\n new.one_name = name_one[0][0]\n\n # Retrieve Fighter two\n query_two = \"SELECT combatant.name FROM public.combatant WHERE \"\n query_two += \"combatant.id = \"\n query_two += str(new.two_id)\n\n try:\n cur.execute(query_two)\n name_two = cur.fetchall()\n except:\n print(\"\\n -Failed to query name. {0}\".format(e), file=sys.stderr)\n\n new.two_name = name_two[0][0]\n\n if data[3] == 'One':\n new.winner = new.one_name\n elif data[3] == 'Two':\n new.winner = new.two_name\n else:\n new.winner = 'Tie'\n fights.append(new)\n\n if id is not None:\n return render_template('battle_data.html', battle=fights[0])\n else:\n return render_template('battle.html', fights=fights)\n\n\n@app.route('/results')\ndef results_proc():\n \"\"\"Queries the database for win results\"\"\"\n query_win = \"SELECT id, count(*) as wins FROM (\"\n query_win += \"SELECT CASE \"\n query_win += \"WHEN winner = 'One' THEN combatant_one \"\n query_win += \"WHEN winner = 'Two' THEN combatant_two \"\n query_win += \"end AS id, COUNT(*) AS wins FROM fight \"\n query_win += \"GROUP BY id \"\n query_win += \"ORDER BY wins desc) AS wins WHERE ID IS NOT NULL GROUP BY \"\n query_win += \"id ORDER BY wins DESC\"\n\n # Connection to retrieve fighter data\n try:\n cur.execute(query_win)\n data_win = cur.fetchall()\n except Exception as e:\n failure = \"\\n -Failed to query fighter data. {0}\"\n print(failure.format(e), file=sys.stderr)\n abort(404)\n\n # Number top ranked and create list of queried results\n rank = 1\n combatants = []\n for win in data_win:\n # Retrieve Fighter one\n query_name = \"SELECT combatant.name, species.name FROM \"\n query_name += \"public.combatant, public.species WHERE \"\n query_name += \"combatant.species_id = species.id and combatant.id = \"\n query_name += str(win[0])\n\n try:\n cur.execute(query_name)\n name = cur.fetchall()\n except Exception as e:\n failure = \"\\n -Failed to query fighter data. {0}\"\n print(failure.format(e), file=sys.stderr)\n abort(404)\n\n current = Combatant(win[0], name[0][0], win[1])\n current.rank = rank\n current.wins = win[1]\n combatants.append(current)\n rank += 1\n\n return render_template('results.html', combatants=combatants)\n\n\nif __name__ == '__main__':\n app.run(port=8047)\n cur.close()\n","sub_path":"safetydome.py","file_name":"safetydome.py","file_ext":"py","file_size_in_byte":8899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"86921177","text":"from collections import namedtuple\n\nContact = namedtuple('Contact', 'first last age email')\n\nrecords = [\n Contact('John', 'Smith', 43, 'jsbrony@yahoo.com'),\n Contact('Ellen', 'James', 32, 'jamestel@google.com'),\n Contact('Sally', 'Edwards', 36, 'steclone@yahoo.com'),\n Contact('Keith', 'Cramer', 29, 'kcramer@sintech.com')\n]\nrecords.sort(key=lambda one_rec: one_rec.age, reverse=True)\n\nfor record in records:\n print(record.last, record.age)\n","sub_path":"Optum Tech/student_files/ch01_overview/07_namedtuples.py","file_name":"07_namedtuples.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"616181627","text":"from tkinter import StringVar, messagebox\nfrom tkinter.constants import SUNKEN\nimport tkinter.font as tkFont\nimport tkinter as tk\n\nfrom ..config import *\nfrom ..TopLevelObject import *\n\n#import Engine\nimport Engine.config\nimport Engine.___Engine\nfrom Engine.___Engine import Engine\nfrom Engine.config import *\n\nclass Header(TopLevelObject):\n def __init__(self, master):\n super().__init__(master)\n\n self.engine = None\n self.engine_stopped = True\n self.engine_text = tk.StringVar()\n self.engine_text.set('Start engine')\n\n def Grid(self, **options):\n super().Grid(options)\n\n leftFrame = tk.Frame()\n self.bnEngine = tk.Button(leftFrame, textvariable = self.engine_text, command=self.Toggle_Start_Engine, bg = 'orange', fg = 'navy blue').grid(row=1, column=1, sticky='W')\n self.buyCrypto = tk.Button(leftFrame, text='Buy Crypto', bg=Color.BG2.value, fg=Color.HighFG.value).grid(row=1, column=2, sticky='W')\n self.markets = tk.Button(leftFrame, text='Markets').grid(row=1, column=3, sticky='W')\n self.trade = tk.Button(leftFrame, text='Trade').grid(row=1, column=4, sticky='W')\n self.derivatives = tk.Button(leftFrame, text='Derivatives').grid(row=1, column=5, sticky='W')\n self.finance = tk.Button(leftFrame, text='Finance').grid(row=1, column=6, sticky='W')\n leftFrame.grid(row=1, column=1, sticky='W')\n \n rightFrame = tk.Frame()\n self.wallet = tk.Button(rightFrame, text='Wallet').grid(row=1, column=1, sticky='E')\n self.orders = tk.Button(rightFrame, text='Orders').grid(row=1, column=2, sticky='E')\n self.account = tk.Button(rightFrame, text='Account').grid(row=1, column=3, sticky='E')\n self.language = tk.Button(rightFrame, text='English').grid(row=1, column=4, sticky='E')\n self.currency = tk.Button(rightFrame, text='USD').grid(row=1, column=5, sticky='E')\n self.theme = tk.Button(rightFrame, text='***').grid(row=1, column=6, sticky='E')\n rightFrame.grid(row=1, column=2, sticky='E')\n\n\n def Pack(self, **options):\n super().Pack(options)\n\n #tk.Button(self.master, text='Header button1').pack(side='top')\n \n self.button1 = tk.Button(self.frame, text='Header button top')\n self.button1.pack(side='top')\n\n self.button2 = tk.Button(self.frame, text='Header button bottom')\n self.button2.pack(side='bottom')\n \n def Toggle_Start_Engine(self):\n if self.engine_stopped:\n if self.engine is None:\n self.engine = Engine() # A TopLevelObject property.\n TopLevelObject.engine = self.engine # so engine is now shared between all TopLevelObjects.\n self.engine.Start(Config['structure'], Config['timing'])\n self.engine_stopped = False\n self.engine_text.set('Stop engine')\n else:\n stopped = self.engine.Stop()\n if stopped:\n self.engine_stopped = True\n self.engine_text.set('Start engine')\n return\n","sub_path":"GUI/Header/Header.py","file_name":"Header.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"116363742","text":"\"\"\"Setup script for the package\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom rpncalc import __version__\n\nwith open(\"README.md\") as readme_file:\n README = readme_file.read()\n\n\nsetup(\n name=\"rpn\",\n version=__version__,\n description=\"RPN calc\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n author=\"Maxime Peresson\",\n author_email=\"maxime.peresson@gmail.com\",\n classifiers=[\n ],\n python_requires=\">=3.5\",\n test_suite=\"test\",\n packages=find_packages(exclude=[\"test\"]),\n entry_points={\n \"console_scripts\": [\n \"rpn=rpncalc.main:main\",\n ]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"59328722","text":"\"\"\" rest - routes mapping based on oaspecs\n\n\nSee tests/test_rest_routing.py for example of usage\n\"\"\"\n\nimport inspect\nimport logging\nfrom collections import namedtuple\nfrom collections.abc import Callable, Iterator, Mapping\n\nfrom aiohttp import web\n\nfrom .openapi import OpenApiSpec, get_base_path\n\nlogger = logging.getLogger(__name__)\n\n\ndef has_handler_signature(fun) -> bool:\n # TODO: last parameter is web.Request or called request?\n return any(\n param.annotation == web.Request\n for name, param in inspect.signature(fun).parameters.items()\n )\n\n\ndef get_handlers_from_namespace(handlers_nsp) -> dict:\n \"\"\"Gets all handlers in a namespace define by a class or a module\"\"\"\n # TODO: Should search for function that are marked as \"handlers\". Similar to @pytest.fixtures??\n if inspect.ismodule(handlers_nsp):\n\n def predicate(obj):\n return inspect.isfunction(obj) and has_handler_signature(obj)\n\n elif hasattr(handlers_nsp, \"__class__\"):\n\n def predicate(obj):\n return inspect.ismethod(obj) and has_handler_signature(obj)\n\n else:\n raise ValueError(\n \"Expected module or class as namespace, got %s\" % type(handlers_nsp)\n )\n\n return dict(inspect.getmembers(handlers_nsp, predicate))\n\n\nPathOperation = namedtuple(\"PathOperation\", \"method path operation_id tags\")\n\n\ndef iter_path_operations(specs: OpenApiSpec) -> Iterator[PathOperation]:\n \"\"\"Iterates paths in api specs returning tuple (method, path, operation_id, tags)\n\n NOTE: prepend API version as basepath to path url, e.g. /v0/my/path for path=/my/path\n \"\"\"\n base_path = get_base_path(specs)\n assert base_path.startswith(\"/v\") # nosec\n\n for url, path in specs.paths.items():\n for method, operation in path.operations.items():\n yield PathOperation(\n method.upper(), base_path + url, operation.operation_id, operation.tags\n )\n\n\ndef map_handlers_with_operations(\n handlers_map: Mapping[str, Callable],\n operations_it: Iterator[PathOperation],\n *,\n strict: bool = True,\n) -> list[web.RouteDef]:\n \"\"\"Matches operation ids with handler names and returns a list of routes\n\n :param handlers_map: .See get_handlers_from_namespace\n :type handlers_map: Mapping[str, Callable]\n :param operations_it: iterates over specs operations. See iter_path_operations\n :type operations_it: Iterator[PathOperation]\n :param strict: it raises an error if either a handler or an operator was not mapped, defaults to True\n :param strict: bool, optional\n :raises ValueError: if not operations mapped\n :raises RuntimeError: if not handlers mapped\n :rtype: List[web.RouteDef]\n \"\"\"\n\n handlers = dict(handlers_map)\n routes = []\n for method, path, operation_id, _tags in operations_it:\n handler = handlers.pop(operation_id, None)\n if handler:\n routes.append(web.route(method.upper(), path, handler, name=operation_id))\n elif strict:\n msg = f\"Cannot find any handler named {operation_id} \"\n raise ValueError(msg)\n\n if handlers and strict:\n msg = f\"{len(handlers)} handlers were not mapped to routes: {handlers.keys()}\"\n raise RuntimeError(msg)\n\n return routes\n\n\ndef create_routes_from_namespace(\n specs: OpenApiSpec, handlers_nsp, *, strict: bool = True\n) -> list[web.RouteDef]:\n \"\"\"Gets *all* available handlers and maps one-to-one to *all* specs routes\n\n :param specs: openapi spec object\n :type specs: OpenApiSpec\n :param handlers_nsp: class or module with handler functions\n :param strict: ensures strict mapping, defaults to True\n :param strict: bool, optional\n :rtype: List[web.RouteDef]\n \"\"\"\n handlers = get_handlers_from_namespace(handlers_nsp)\n\n if not handlers and strict:\n raise ValueError(\"No handlers found in %s\" % handlers_nsp)\n\n return map_handlers_with_operations(\n handlers, iter_path_operations(specs), strict=strict\n )\n","sub_path":"packages/service-library/src/servicelib/aiohttp/rest_routing.py","file_name":"rest_routing.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552509801","text":"#!/usr/bin/python3\n# coding=utf-8\n\n\nclass Node(object):\n def __init__(self, new_data):\n self.data = new_data\n self.next = None\n self.prev = None\n\n\nclass DoubleList(object):\n def __init__(self):\n self.__head = Node(None)\n self.__length = 0\n\n def get_length(self):\n return self.__length\n def is_empty(self):\n if self.__length == 0:\n return True\n else:\n return False\n\n def append(self, data):\n new_node = Node(data)\n cur = self.__head\n while cur.next is not None:\n cur = cur.next\n else:\n cur.next = new_node\n new_node.prev = cur\n self.__length += 1\n\n def travel(self):\n cur = self.__head\n for i in range(self.__length):\n print(cur.next.data, end=\"->\")\n cur = cur.next\n else:\n print(\"\")\n\n def insert(self, posi, new_data):\n new_node = Node(new_data)\n cur = self.__head\n for i in range(self.__length):\n if i == posi:\n new_node.next = cur.next\n new_node.prev = cur\n cur.next.prev = new_node\n cur.next = new_node\n self.__length += 1\n else:\n cur = cur.next\n else:\n print(\"index is %s, legth is %s\"%(posi, self.__length))\n\n def remove(self, new_data):\n cur = self.__head\n if self.is_empty():\n print(\"empty , can't remove any\")\n return\n for i in range(self.__length):\n if cur.next.data == new_data:\n cur.next = cur.next.next\n cur.next.next.prev = cur\n self.__length -= 1\n else:\n cur = cur.next\n else:\n print(\"the data is not in the double list\")\n\n\ndef main():\n\n dbl = DoubleList()\n for i in range(10):\n dbl.append(i)\n print(dbl.get_length())\n dbl.travel()\n dbl.insert(0, 100)\n dbl.insert(3, 800)\n dbl.insert(100, 0)\n dbl.travel()\n dbl.remove(800)\n dbl.travel()\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"j14day/3双向列表.py","file_name":"3双向列表.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444865129","text":"#!/usr/bin/python\n#coding:utf-8\n\n# https://leetcode-cn.com/explore/featured/card/array-and-string/198/introduction-to-array/770/\n# 寻找数组的中心索引\n# 给定一个整数类型的数组 nums,请编写一个能够返回数组“中心索引”的方法。\n# 我们是这样定义数组中心索引的:数组中心索引的左侧所有元素相加的和等于右侧所有元素相加的和。\n# 如果数组不存在中心索引,那么我们应该返回 -1。如果数组有多个中心索引,那么我们应该返回最靠近左边的那一个。\n# 示例 1:\n\n# 输入: \n# nums = [1, 7, 3, 6, 5, 6]\n# 输出: 3\n# 解释: \n# 索引3 (nums[3] = 6) 的左侧数之和(1 + 7 + 3 = 11),与右侧数之和(5 + 6 = 11)相等。\n# 同时, 3 也是第一个符合要求的中心索引。\n# 示例 2:\n\n# 输入: \n# nums = [1, 2, 3]\n# 输出: -1\n# 解释: \n# 数组中不存在满足此条件的中心索引。\n# 说明:\n\n# nums 的长度范围为 [0, 10000]。\n# 任何一个 nums[i] 将会是一个范围在 [-1000, 1000]的整数。\n\nclass Solution(object):\n def pivotIndex(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # 方法:前缀和\n # S 是数组的和,当索引 i 是中心索引时,位于 i 左边数组元素的和 leftsum 满足 S - nums[i] - leftsum。\n # 我们只需要判断当前索引 i 是否满足 leftsum==S-nums[i]-leftsum 并动态计算 leftsum 的值。\n S = sum(nums)\n leftsum = 0\n for i, x in enumerate(nums):\n if leftsum == (S - leftsum - x):\n return i\n leftsum += x\n return -1\n\nnums = [1, 7, 3, 6, 5, 6]\ns = Solution()\nn = s.pivotIndex(nums)\nprint(n) ","sub_path":"数组和字符串/array_3.py","file_name":"array_3.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"600968641","text":"import os\nimport logging\n\n\ndef create_folder(fd):\n if not os.path.exists(fd):\n os.makedirs(fd)\n \n\ndef create_logging(log_dir, filemode):\n create_folder(log_dir)\n i1 = 0\n\n while os.path.isfile(os.path.join(log_dir, '{:04d}.log'.format(i1))):\n i1 += 1\n \n log_path = os.path.join(log_dir, '{:04d}.log'.format(i1))\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=log_path,\n filemode=filemode)\n\n # Print to console\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n \n return logging","sub_path":"metric_sub/src_train/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"503619728","text":"from RL import RL\nfrom State import State\nfrom pathlib import Path\nimport numpy\nimport torch\nfrom torch.autograd import Variable\nimport sys\n\n\nprint(\"___________\")\nprint(\"Connect 4\")\nprint(\"___________\\n\")\n\nmode = \"CPU\"\nrunOnGPU = len(sys.argv)>1 #If a console parameter is received run in GPU. else run on CPU\nif(runOnGPU):\n mode = \"GPU\"\n\n#Learning parameters\nbatch_size = 64\nlearning_rate = 0.000001\ninitial_epsilon = 0.25\nepsilon_decay = 0.999997 #This decay value achieves 0.97 in episode 10,000, 0.74 in episode 100,000, 0.05 in episode 1,000,000\ndiscount = 0.95\ntrainEpisodes = 40000\nexperience_stored = 1000000\nstep_delta = 1000\n\n\n#Number of episodes to run before displaying learning stats\ndisplay_frequency = 10\n\nAI = RL(batch_size , learning_rate, initial_epsilon, epsilon_decay, discount, experience_stored, step_delta, display_frequency, runOnGPU)\n\nCPUfile = Path(\"netCPU.pt\")\nGPUfile = Path(\"netGPU.pt\")\n\n#Load experience information from previous sessions\nAI.approximator.loadExperience(\"experience.pkl\")\nif (runOnGPU and GPUfile.is_file()) or (not runOnGPU and CPUfile.is_file()):\n print(\"Loaded Network\", mode)\n print(\"Learning...\")\n if(runOnGPU):\n AI.approximator = torch.load(\"netGPU.pt\")\n AI.QLearningGPU(trainEpisodes)\n torch.save(AI.approximator, \"netGPU.pt\")\n else:\n AI.approximator = torch.load(\"netCPU.pt\")\n AI.QLearningCPU(trainEpisodes)\n torch.save(AI.approximator, \"netCPU.pt\")\n\nelse:\n print(\"Starting New Training\" , mode)\n print(\"Learning...\")\n if(runOnGPU):\n AI.QLearningGPU(trainEpisodes)\n torch.save(AI.approximator, \"netGPU.pt\")\n else:\n AI.QLearningCPU(trainEpisodes)\n torch.save(AI.approximator, \"netCPU.pt\")\n\n#Store experience information in text file for later training sessions\nAI.approximator.saveExperience(\"experience.pkl\")\n\nwhile(True):\n val = input(\"\\nEnter 1 to go first, enter otherwise to go second: \")\n\n state = State()\n stateVector = state.getTensor()\n playerTurn = False\n R = 0\n inputTensor = torch.FloatTensor(1, 2, 6, 7).zero_() #Initialize tensor for input states\n\n if(val==\"1\"):\n playerTurn = True\n\n while((R!=-1 and R!=1) and state.movesLeft>0):\n state.print(\"+\",\"-\",\"0\")\n\n #Player Moves\n if(playerTurn):\n #Check that the player introduced an avaiable move\n\n while(True):\n val = input(\"\\nYour Move (1 - 7): \")\n A = eval(val)-1\n if(state.avMoves[A]):\n break\n else:\n state.print(\"+\",\"-\",\"0\")\n print(\"\\nInvalid move, please select an available move only\")\n #AI Moves\n else:\n print(\"\\n AI Moved\")\n inputTensor[0] = torch.FloatTensor(stateVector)\n if(runOnGPU):\n A = AI.approximator.bestAction(Variable(inputTensor).cuda(), state.avMoves)\n else:\n A = AI.approximator.bestAction(Variable(inputTensor), state.avMoves)\n state.act(A)\n stateVector = state.getTensor()\n R = state.reward()\n playerTurn = not playerTurn\n\n state.print(\"+\",\"-\",\"0\")\n\n if(R!=-1 and R!=1):\n print(\"Tie\")\n elif(R and playerTurn):\n print(\"AI wins\")\n else:\n print(\"You win\")\n\n val = input(\"\\nEnter 1 to play another game: \")\n if(val!=\"1\"):\n break\n","sub_path":"Connect4-DQN/HumanGame.py","file_name":"HumanGame.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"45931012","text":"from const import BCC_KEY\nfrom const import CC_KEY\nfrom const import FROM_KEY\nfrom const import REPLY_KEY\nfrom const import SUBJECT_KEY\nfrom const import TO_KEY\nfrom const import TYPE_KEY\nfrom peewee import BooleanField\nfrom peewee import CharField\nfrom peewee import IntegerField\nfrom peewee import Model\nfrom peewee import SqliteDatabase\n\ndb = SqliteDatabase('emails.db')\n\n\nclass BaseModel(Model):\n\n class Meta(object):\n database = db\n\n\nclass Email(BaseModel):\n\n # fields\n id = IntegerField(primary_key=True)\n subject = CharField()\n from_email = CharField()\n to_emails = CharField()\n cc_emails = CharField(null=True)\n bcc_emails = CharField(null=True)\n email_type = CharField()\n reply = BooleanField()\n\n # database connection\n database = None\n\n def __repr__(self):\n representation = (\n \"0:\n rand1=random.randint(0,len(list1)-1)\n list2.append(list1.pop(rand1))\n list2.append(list2[0])\n return list2\n\n\ndef distancesFromCoords():\n f = open('kroA100.tsp')\n data = [line.replace(\"\\n\",\"\").split(\" \")[1:] for line in f.readlines()[6:106]]\n coords = list(map(lambda x: [float(x[0]),float(x[1])], data))\n distances = []\n for i in range(len(coords)):\n row = []\n for j in range(len(coords)):\n row.append(math.sqrt((coords[i][0]-coords[j][0])**2 + (coords[i][1]-coords[j][1])**2))\n distances.append(row)\n return distances\n\ndef calculateZ(myList,distances): \n sum=0\n for i in range(len(myList)-1): #se detiene en -2 para llegar a la penultima ciudad porque la ultima es el retorno\n fromCity=myList[i] #el numero en la posicion i. (que puede ser del 0 al 99)\n toCity=myList[i+1]\n sum=sum+distances[fromCity][toCity]\n return sum\n\ndef explo_matrizFeromonaInicial(matrizDistancias,n):\n #Creamos matriz inicialziada en 0 de ncities x ncities\n matrizFeromonas=np.zeros(np.shape(matrizDistancias))\n #Generamos cualquier solucion inicial\n nSolution=generateInitialSolution(len(matrizDistancias))\n #el ciclo se repetira n veces primero (1000 estaria bien)\n while(n>0):\n #en cada iteracion se probara con una solucion aleatoria y apartir de su Z\n #se llenara la matriz de feromonas\n #no es perturbar es general una matriz totalmente Distinta\n nSolution=generateInitialSolution(len(matrizDistancias))\n zOfCurrentSolution=calculateZ(nSolution,matrizDistancias)\n inverseZ=1/zOfCurrentSolution\n #-2 porque la ultima ciudad(len -1 ) no ira a ninguna, ya sera la primera desde donde se partio\n for i in range(len(nSolution)-1):\n fromCity=nSolution[i]\n toCity=nSolution[i+1]\n matrizFeromonas[fromCity][toCity]+=inverseZ\n \n #al final de la iteracion n.i generamos otra solucion para que la matriz de feromonas\n #se actualice con respecto a otra nueva solucion \n n-=1\n return matrizFeromonas\n\ndef matrizProbabilidades(heuristica,feromona,aplha,beta,actual):\n feromona=np.array(feromona)\n heuristica=np.array(heuristica)\n #esto representa el numerador\n matriz=(feromona**aplha)*(heuristica**beta)\n #ahora hacemos el denominador que es la suma de cada columna\n for i in actual:\n matriz[i,:]=0\n\n #denominador\n #sumatoria=np.sum(matriz,axis=0)\n sumatoria=matriz.sum(axis=0)\n\n #la matriz de probabilidades\n probabilidades=matriz/sumatoria\n return probabilidades\n\n#cuando pase por el camino se llenara la feromona y se borrara, eso va despues de usar este metodo\ndef generatePath(heuristica,feromona,aplha,beta):\n contador=0\n actual=[0]\n while(len(actual)ran):\n return j\n return 0\n\n\n\n\n\n\n\n\n#####MAIN##################\nif __name__ == \"__main__\":\n antColonyOptimization(distancesFromCoords(),1,5,0.1,100)\n\n","sub_path":"aco.py","file_name":"aco.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"584124531","text":"\"\"\"\nComponent for studying lipid membranes at an interface\n\"\"\"\n\nimport numpy as np\nfrom refnx.reflect import Component, SLD, ReflectModel, Structure\nfrom refnx.analysis import possibly_create_parameter, Parameters, Parameter\n\n\nclass LipidLeaflet(Component):\n \"\"\"\n Describes a lipid leaflet Component at an interface\n\n Parameters\n ----------\n APM: float or Parameter\n b_heads: float, Parameter or complex\n Sum of coherent scattering lengths of head group (Angstrom)\n vm_heads: float or Parameter\n Molecular volume of head group (Angstrom**2)\n thickness_heads: float or Parameter\n Thickness of head group region (Angstrom)\n b_tails: float, Parameter or complex\n Sum of coherent scattering lengths of tail group (Angstrom)\n vm_tails: float or Parameter\n Molecular volume of tail group (Angstrom**2)\n thickness_tails: float or Parameter\n Thickness of head group region (Angstrom)\n rough_head_tail: float or Parameter\n Roughness of head-tail group (Angstrom)\n rough_preceding_mono: float or Parameter\n Roughness between preceding component (in the fronting direction) and\n the monolayer (Angstrom). If `reverse_monolayer is False` then this is\n the roughness between the preceding component and the heads, if\n `reverse_monolayer is True` then this is the roughness between the\n preceding component and the tails.\n reverse_monolayer: bool, optional\n The default is to have heads closer to the fronting medium and\n tails closer to the backing medium. If `reverse_monolayer is True`\n then the tails will be closer to the fronting medium and heads\n closer to the backing medium.\n name: str, optional\n The name for the component\n\n Notes\n -----\n The sum of coherent scattering lengths must be in Angstroms, the volume\n must be in cubic Angstroms. This is because the SLD of a tail group is\n calculated as `b_tails / vm_tails * 1e6` to achieve the units\n 10**6 Angstrom**-2.\n \"\"\"\n\n # TODO: use SLD of head instead of b_heads, vm_heads?\n def __init__(self, apm, b_heads, vm_heads, thickness_heads,\n b_tails, vm_tails, thickness_tails, rough_head_tail,\n rough_preceding_mono, reverse_monolayer=False, name=''):\n \"\"\"\n Parameters\n ----------\n apm: float or Parameter\n Area per molecule\n b_heads: float, Parameter or complex\n Sum of coherent scattering lengths of head group (Angstrom)\n vm_heads: float or Parameter\n Molecular volume of head group (Angstrom**3)\n thickness_heads: float or Parameter\n Thickness of head group region (Angstrom)\n b_tails: float, Parameter or complex\n Sum of coherent scattering lengths of tail group (Angstrom)\n vm_tails: float or Parameter\n Molecular volume of tail group (Angstrom**3)\n thickness_tails: float or Parameter\n Thickness of head group region (Angstrom)\n reverse_monolayer: bool, optional\n The default is to have heads closer to the fronting medium and\n tails closer to the backing medium. If `reverse_monolayer is True`\n then the tails will be closer to the fronting medium and heads\n closer to the backing medium.\n name: str, optional\n The name for the component\n \"\"\"\n super(LipidLeaflet, self).__init__()\n self.apm = possibly_create_parameter(apm,\n '%s - area_per_molecule' % name)\n\n if isinstance(b_heads, complex):\n self.b_heads_real = possibly_create_parameter(\n b_heads.real,\n name='%s - b_heads_real' % name)\n self.b_heads_imag = possibly_create_parameter(\n b_heads.imag,\n name='%s - b_heads_imag' % name)\n else:\n self.b_heads_real = possibly_create_parameter(\n b_heads,\n name='%s - b_heads_real' % name)\n self.b_heads_imag = possibly_create_parameter(\n 0,\n name='%s - b_heads_imag' % name)\n\n self.vm_heads = possibly_create_parameter(\n vm_heads,\n name='%s - vm_heads' % name)\n\n self.thickness_heads = possibly_create_parameter(\n thickness_heads,\n name='%s - thickness_heads' % name)\n\n if isinstance(b_tails, complex):\n self.b_tails_real = possibly_create_parameter(\n b_tails.real,\n name='%s - b_tails_real' % name)\n self.b_tails_imag = possibly_create_parameter(\n b_tails.imag,\n name='%s - b_tails_imag' % name)\n else:\n self.b_tails_real = possibly_create_parameter(\n b_tails,\n name='%s - b_tails_real' % name)\n self.b_tails_imag = possibly_create_parameter(\n 0,\n name='%s - b_tails_imag' % name)\n\n self.vm_tails = possibly_create_parameter(\n vm_tails,\n name='%s - vm_tails' % name)\n self.thickness_tails = possibly_create_parameter(\n thickness_tails,\n name='%s - thickness_tails' % name)\n self.rough_head_tail = possibly_create_parameter(\n rough_head_tail,\n name='%s - rough_head_tail' % name)\n self.rough_preceding_mono = possibly_create_parameter(\n rough_preceding_mono,\n name='%s - rough_fronting_mono' % name)\n self.reverse_monolayer = reverse_monolayer\n self.name = name\n\n @property\n def slabs(self):\n \"\"\"\n Returns\n -------\n slab_model: np.ndarray\n Slab representation of monolayer\n \"\"\"\n layers = np.zeros((2, 5))\n\n # thicknesses\n layers[0, 0] = float(self.thickness_heads)\n layers[1, 0] = float(self.thickness_tails)\n\n # real and imag SLD's\n layers[0, 1] = float(self.b_heads_real) / float(self.vm_heads) * 1.e6\n layers[0, 2] = float(self.b_heads_imag) / float(self.vm_heads) * 1.e6\n\n layers[1, 1] = float(self.b_tails_real) / float(self.vm_tails) * 1.e6\n layers[1, 2] = float(self.b_tails_imag) / float(self.vm_tails) * 1.e6\n\n # roughnesses\n layers[0, 3] = float(self.rough_preceding_mono)\n layers[1, 3] = float(self.rough_head_tail)\n\n # volume fractions\n # head region\n volfrac = self.vm_heads.value / (self.apm.value *\n self.thickness_heads.value)\n layers[0, 4] = 1 - volfrac\n\n # tail region\n volfrac = self.vm_tails.value / (self.apm.value *\n self.thickness_tails.value)\n layers[1, 4] = 1 - volfrac\n\n if self.reverse_monolayer:\n layers = np.flipud(layers)\n layers[:, 3] = layers[::-1, 3]\n\n return layers\n\n @property\n def parameters(self):\n p = Parameters(name=self.name)\n p.extend([self.apm,\n self.b_heads_real, self.b_heads_imag, self.vm_heads,\n self.thickness_heads,\n self.b_tails_real, self.b_tails_imag, self.vm_tails,\n self.thickness_tails, self.rough_head_tail,\n self.rough_preceding_mono])\n return p\n\n def lnprob(self):\n # penalise unphysical volume fractions.\n volfrac_h = self.vm_heads.value / (self.apm.value *\n self.thickness_heads.value)\n\n # tail region\n volfrac_t = self.vm_tails.value / (self.apm.value *\n self.thickness_tails.value)\n\n if volfrac_h > 1 or volfrac_t > 1:\n return -np.inf\n\n return 0\n","sub_path":"refnx/reflect/_lipid.py","file_name":"_lipid.py","file_ext":"py","file_size_in_byte":7859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"386954855","text":"import string\nimport os\nimport collections\nfrom utils.os_utils import grab_files, mkdir\nimport utils.db_utils as db_utils\n\nimport pandas as pd\nimport pyparsing as pypar\n\nfrom utils.parallel_utils import jobmap\n\nglobal_limit = 0\nglobal_offset = 0\n\n\nclass parenthesis_nester(object):\n\n def __init__(self):\n nest = pypar.nestedExpr\n g = pypar.Forward()\n nestedParens = nest('(', ')')\n nestedBrackets = nest('[', ']')\n nestedCurlies = nest('{', '}')\n nest_grammar = nestedParens | nestedBrackets | nestedCurlies\n\n parens = \"(){}[]\"\n letters = ''.join([x for x in pypar.printables\n if x not in parens])\n word = pypar.Word(letters)\n\n g = pypar.OneOrMore(word | nest_grammar)\n self.grammar = g\n\n def __call__(self, line):\n try:\n tokens = self.grammar.parseString(line)\n except:\n return []\n return tokens\n\n\ndef is_valid_abbr(item):\n if isinstance(item, unicode):\n return False\n if len(item) != 1:\n return False\n\n word = item[0]\n\n # Break if we are doubly nested\n if not isinstance(word, unicode):\n return False\n\n # Check if there are any capital letters\n if word.lower() == word:\n return False\n\n return word\n\n\ndef check_matching(word, k, tokens):\n # Identify the capital letters\n caps = [let for let in word if\n let in string.ascii_uppercase.upper()]\n\n # Don't try to match with only a single letter (to noisy!)\n if len(caps) < 2:\n return False\n\n # This may fail if used too early in doc or if nested parens\n # this shouldn't be a match so it's OK!\n\n try:\n subtokens = tokens[k - len(caps):k]\n subtoken_let = [let.upper()[0] for let in subtokens]\n except:\n return False\n\n if subtoken_let != caps:\n return False\n\n return tuple(subtokens)\n\n\ndef evaluate_document(row, col):\n doc = row[col]\n\n doc = unicode(doc)\n doc = doc.replace('-', ' ')\n doc = doc.replace(\"'\", '')\n doc = doc.replace('\"', '')\n\n P = parenthesis_nester()\n tokens = P(doc)\n\n results = collections.Counter()\n\n for k, item in enumerate(tokens):\n word = is_valid_abbr(item)\n if word:\n subtokens = check_matching(word, k, tokens)\n if subtokens:\n results[(tuple(subtokens), word)] += 1\n\n # if results:\n # print \"Found {} abbrs in doc idx {}\".format(len(results),idx)\n\n return results\n\n\ndef dedupe_abbr(ABR):\n\n df = pd.DataFrame()\n df['phrase'] = [' '.join(x[0]) for x in ABR.keys()]\n df['abbr'] = [x[1] for x in ABR.keys()]\n df['count'] = ABR.values()\n\n # Match phrases on lowercase and remove trailing 's'\n df['reduced_phrase'] = df.phrase.str.strip()\n df['reduced_phrase'] = df.reduced_phrase.str.lower()\n df['reduced_phrase'] = df.reduced_phrase.str.rstrip('s')\n\n data = []\n for phrase, dfx in df.groupby('reduced_phrase'):\n top = dfx.sort_values(\"count\", ascending=False).iloc[0]\n\n item = {}\n item[\"count\"] = dfx[\"count\"].sum()\n item[\"phrase\"] = top[\"phrase\"]\n item[\"abbr\"] = top[\"abbr\"]\n data.append(item)\n\n df = pd.DataFrame(data).set_index(\"phrase\")\n return df.sort_values(\"count\", ascending=False)\n\n\ndef phrases_from_config(config):\n\n _PARALLEL = config.as_bool(\"_PARALLEL\")\n output_dir = config[\"phrase_identification\"][\"output_data_directory\"]\n\n target_column = config[\"target_column\"]\n\n import_config = config[\"import_data\"]\n input_data_dir = import_config[\"output_data_directory\"]\n\n F_CSV = grab_files(\"*.csv\", input_data_dir)\n\n ABR = collections.Counter()\n\n dfunc = db_utils.CSV_database_iterator\n INPUT_ITR = dfunc(F_CSV, target_column, progress_bar=True)\n ITR = jobmap(evaluate_document, INPUT_ITR, _PARALLEL, col=target_column)\n\n for result in ITR:\n ABR.update(result)\n\n msg = \"\\n{} total abbrs found.\"\n print(msg.format(len(ABR)))\n\n # Merge abbreviations that are similar\n print(\"Deduping abbr list.\")\n df = dedupe_abbr(ABR)\n print(\"{} abbrs remain after deduping\".format(len(df)))\n\n # Output top phrase\n print(\"Top 5 abbreviations\")\n print(df[:5])\n\n mkdir(output_dir)\n f_csv = os.path.join(output_dir,\n config[\"phrase_identification\"][\"f_abbreviations\"])\n df.to_csv(f_csv)\n\n\nif __name__ == \"__main__\":\n\n import simple_config\n config = simple_config.load()\n phrases_from_config(config)\n","sub_path":"word2vec_pipeline/phrases_from_abbrs.py","file_name":"phrases_from_abbrs.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"576849434","text":"import random\nimport itertools\nimport numpy as np\nfrom gym import spaces\n\nfrom traffic.traffic_env import TrafficEnv\nfrom traffic.road import Road, RoadSegment\nfrom traffic.car import Car\nfrom traffic.drivers.driver import Driver, XYSeperateDriver\nfrom traffic.drivers.oned_drivers import IDMDriver, PDDriver, PDriver\nfrom traffic.actions.trajectory_accel_action import TrajectoryAccelAction\nfrom traffic.constants import *\n\nclass EnvDriver(XYSeperateDriver):\n def __init__(self, \n aggressive,\n x_sigma, y_sigma,\n car,\n **kwargs):\n self.aggressive = aggressive\n self.target_lane = None\n self.car = car\n if self.aggressive:\n v_des = np.random.uniform(0.5, 1.0)\n s_des = np.random.uniform(0.8, 1.0)\n s_min = np.random.uniform(0.4, 0.6)\n min_overlap = -self.car.width/2.\n self.min_front_x = self.car.length + s_min\n self.min_back_x = self.car.length + s_min\n self.min_advantage = self.car.length/2.\n else:\n v_des = np.random.uniform(0.0, 0.5)\n s_des = np.random.uniform(0.9, 1.1)\n s_min = np.random.uniform(0.5, 0.7)\n min_overlap = self.car.width/2.\n self.min_front_x = self.car.length + s_min\n self.min_back_x = self.car.length + s_min\n self.min_advantage = self.car.length\n x_driver = IDMDriver(sigma=x_sigma, v_des=v_des, s_des=s_des, s_min=s_min, axis=0, min_overlap=min_overlap, car=car, **kwargs)\n y_driver = PDDriver(sigma=y_sigma, p_des=0., a_max=1.0, axis=1, k_p=2.0, k_d=5.0, car=car, **kwargs)\n self.on_target = True\n super(EnvDriver, self).__init__(x_driver,y_driver,car=car,**kwargs)\n\n def observe(self, cars, road):\n x, y = self.car.position\n min_front_distance0 = np.inf\n min_back_distance0 = np.inf\n min_front_distance1 = np.inf\n min_back_distance1 = np.inf\n for car in cars:\n if car is self.car:\n continue\n if car.position[1] <= 4.0:\n if (car.position[0] > x) and (car.position[0]-x < min_front_distance0):\n min_front_distance0 = car.position[0] - x\n elif (car.position[0] < x) and (x-car.position[0] < min_back_distance0):\n min_back_distance0 = x - car.position[0]\n elif car.position[1] > 4.0:\n if (car.position[0] > x) and (car.position[0]-x < min_front_distance1):\n min_front_distance1 = car.position[0] - x\n elif (car.position[0] < x) and (x-car.position[0] < min_back_distance1):\n min_back_distance1 = x - car.position[0]\n\n if y <= 4.0:\n if (min_front_distance1 - min_front_distance0 > self.min_advantage) \\\n and (min_front_distance1 > self.min_front_x) \\\n and (min_back_distance1 > self.min_back_x):\n self.y_driver.p_des = 6.0\n else:\n self.y_driver.p_des = 2.0\n else:\n if (min_front_distance0 - min_front_distance1 > self.min_advantage) \\\n and (min_front_distance0 > self.min_front_x) \\\n and (min_back_distance0 > self.min_back_x):\n self.y_driver.p_des = 2.0\n else:\n self.y_driver.p_des = 6.0\n\n self.x_driver.observe(cars, road)\n self.y_driver.observe(cars, road)\n\n def setup_render(self, viewer):\n if not self.aggressive:\n self.car._color = [*GREEN_COLORS[0],0.5]\n else:\n self.car._color = [*RED_COLORS[0],0.5]\n self.car._arr_color = [0.8, 0.8, 0.8, 0.5]\n\n def update_render(self, camera_center):\n if not self.aggressive:\n self.car._color = [*GREEN_COLORS[0],0.5]\n else:\n self.car._color = [*RED_COLORS[0],0.5]\n self.car._arr_color = [0.8, 0.8, 0.8, 0.5]\n\nclass EgoDriver(XYSeperateDriver):\n def __init__(self, \n x_sigma, y_sigma,\n **kwargs):\n\n x_driver = IDMDriver(sigma=x_sigma, v_des=0.0, s_des=0.7, s_min=0.5, axis=0, min_overlap=0., **kwargs)\n y_driver = PDDriver(sigma=y_sigma, p_des=0.0, a_max=1.0, axis=1, **kwargs)\n super(EgoDriver, self).__init__(x_driver,y_driver,**kwargs)\n\n def apply_action(self, action):\n self.x_driver.v_des = action[0]\n if action[1] == 0:\n self.y_driver.p_des = 2.0\n else:\n self.y_driver.p_des = 6.0\n\nclass HighWay(TrafficEnv):\n def __init__(self,\n obs_noise=0.,\n x_actions=[0.,0.5,3.],\n y_actions=[0,1],\n driver_sigma = 0.,\n control_cost=0.01,\n collision_cost=2.,\n survive_reward=0.01,\n goal_reward=2.,\n road=Road([RoadSegment([(-100.,0.),(100.,0.),(100.,8.),(-100.,8.)])]),\n left_bound = -30.,\n right_bound = 30.,\n gap_min = 8.,\n gap_max = 12.,\n max_veh_num = 12,\n num_updates=1,\n dt=0.1,\n **kwargs):\n\n self.obs_noise = obs_noise\n self.x_actions = x_actions\n self.y_actions = y_actions\n # we use target value instead of target change so system is Markovian\n self.rl_actions = list(itertools.product(x_actions,y_actions))\n self.num_updates = num_updates\n\n self.control_cost = control_cost\n self.collision_cost = collision_cost\n self.survive_reward = survive_reward\n self.goal_reward = goal_reward\n\n self.left_bound = left_bound\n self.right_bound = right_bound\n self.gap_min = gap_min\n self.gap_max = gap_max\n self.max_veh_num = max_veh_num\n self.label_dim = 2\n self.label_num = self.max_veh_num\n\n self._collision = False\n self._goal = False\n self._intentions = []\n self._lower_lane_next_idx = 1\n self._upper_lane_next_idx = int(self.max_veh_num/2.)+1\n\n self.car_length = 5.0\n self.car_width = 2.0\n self.car_max_accel = 5.0\n self.car_max_speed = 5.0\n self.car_max_rotation = 0. #np.pi/18.\n self.car_expose_level = 4\n self.driver_sigma = driver_sigma\n\n super(HighWay, self).__init__(\n road=road,\n cars=[],\n drivers=[],\n dt=dt,\n **kwargs,)\n\n def get_sup_labels(self):\n for driver in self._drivers:\n driver.observe(self._cars, self._road)\n labels = np.array([np.nan]*self.label_num)\n for driver in self._drivers[1:]:\n i = driver._idx - 1\n labels[i] = int(driver.aggressive)\n return labels\n\n def update(self, action):\n # recorder intentios at the begining\n self._sup_labels = self.get_sup_labels()\n\n rl_action = self.rl_actions[action]\n self._drivers[0].apply_action(rl_action)\n\n self._goal = False\n self._collision = False\n for _ in range(self.num_updates):\n for driver in self._drivers:\n driver.observe(self._cars, self._road)\n self._actions = [driver.get_action() for driver in self._drivers]\n [action.update(car, self.dt) for (car, action) in zip(self._cars, self._actions)]\n\n ego_car = self._cars[0]\n for car in self._cars[1:]:\n if ego_car.check_collision(car):\n self._collision = True\n return\n\n if ego_car.position[0] > self.right_bound-2.:\n self._goal = True\n return\n\n # add cars when there is enough space\n min_upper_x = np.inf\n min_lower_x = np.inf\n for car in self._cars:\n if (car.position[1] <= 4.) and (car.position[0] < min_lower_x):\n min_lower_x = car.position[0]\n if (car.position[1] > 4.) and (car.position[0] < min_upper_x):\n min_upper_x = car.position[0]\n if min_lower_x > (self.left_bound + np.random.uniform(self.gap_min,self.gap_max) + self.car_length):\n x, y = self.left_bound, 2.\n aggressive = np.random.choice([True,False])\n car, driver = self.add_car(x, y, 0., 0., aggressive, 0.)\n if hasattr(self, 'viewer') and self.viewer:\n car.setup_render(self.viewer)\n driver.setup_render(self.viewer)\n if min_upper_x > (self.left_bound + np.random.uniform(self.gap_min,self.gap_max) + self.car_length):\n x, y = self.left_bound, 6.\n aggressive = np.random.choice([True,False])\n car, driver = self.add_car(x, y, 0., 0., aggressive, 0.)\n if hasattr(self, 'viewer') and self.viewer:\n car.setup_render(self.viewer)\n driver.setup_render(self.viewer)\n\n # remove cars that are out-of bound\n for car, driver in zip(self._cars[1:],self._drivers[1:]):\n if car.position[0] > self.right_bound:\n self.remove_car(car, driver)\n\n def is_terminal(self):\n return (self._collision or self._goal)\n\n def get_info(self):\n info = {}\n info['sup_labels'] = np.copy(self._sup_labels)\n\n if self._collision:\n info['event']='collision'\n elif self._goal:\n info['event']='goal'\n else:\n info['event']='nothing'\n\n return info\n\n def observe(self):\n # TODO: normalization\n obs = np.zeros(int(4*self.max_veh_num+4))\n for car in self._cars:\n i = int(car._idx*4)\n obs[i:i+2] = car.position + np.random.uniform(-1.,1.,2)*self.obs_noise\n obs[i+2:i+4] = car.velocity + np.random.uniform(-1.,1.,2)*self.obs_noise\n\n obs = np.copy(obs)\n return obs\n\n @property\n def observation_space(self):\n low = -np.ones(int(4*self.max_veh_num+4))\n high = np.ones(int(4*self.max_veh_num+4))\n return spaces.Box(low=low, high=high, dtype=np.float32)\n\n @property\n def action_space(self):\n return spaces.Discrete(len(self.rl_actions))\n\n def get_reward(self):\n reward = 0.\n action = self._actions[0]\n ego_car = self._cars[0]\n v_x, v_y = ego_car.velocity[0], ego_car.velocity[1]\n\n control_cost = 0. # TODO\n reward += self.control_cost*control_cost\n\n if self._collision:\n reward -= self.collision_cost\n elif self._goal:\n reward += self.goal_reward\n else:\n reward += self.survive_reward\n # print(speed_cost, t_cost, control_cost, reward)\n return reward\n\n def remove_car(self, car, driver):\n self._cars.remove(car)\n self._drivers.remove(driver)\n if hasattr(self, 'viewer') and self.viewer:\n car.remove_render(self.viewer)\n driver.remove_render(self.viewer)\n\n def add_car(self, x, y, vx, vy, aggressive, theta):\n if y <= 4.:\n idx = self._lower_lane_next_idx\n self._lower_lane_next_idx += 1\n if self._lower_lane_next_idx > int(self.max_veh_num/2.):\n self._lower_lane_next_idx = 1\n elif y > 4.:\n idx = self._upper_lane_next_idx\n self._upper_lane_next_idx += 1\n if self._upper_lane_next_idx > self.max_veh_num:\n self._upper_lane_next_idx = int(self.max_veh_num/2.)+1\n car = Car(idx=idx, length=self.car_length, width=self.car_width, color=random.choice(RED_COLORS),\n max_accel=self.car_max_accel, max_speed=self.car_max_speed,\n max_rotation=self.car_max_rotation,\n expose_level=self.car_expose_level)\n driver = EnvDriver(aggressive=aggressive, \n x_sigma=self.driver_sigma, y_sigma=0.,\n idx=idx, car=car, dt=self.dt\n ) \n car.set_position(np.array([x, y]))\n car.set_velocity(np.array([vx, vy]))\n car.set_rotation(theta)\n\n self._cars.append(car)\n self._drivers.append(driver)\n return car, driver\n\n def _reset(self):\n self._collision = False\n self._goal = False\n self._intentions = []\n self._lower_lane_next_idx = 1\n self._upper_lane_next_idx = int(self.max_veh_num/2.)+1\n\n self._cars, self._drivers = [], []\n x_0 = self.left_bound\n y_0 = np.random.choice([2.,6.])\n car = Car(idx=0, length=self.car_length, width=self.car_width, color=random.choice(BLUE_COLORS),\n max_accel=self.car_max_accel, max_speed=self.car_max_speed,\n max_rotation=self.car_max_rotation,\n expose_level=self.car_expose_level)\n driver = EgoDriver(x_sigma=self.driver_sigma, y_sigma=0.,\n idx=0,car=car,dt=self.dt)\n car.set_position(np.array([x_0, y_0]))\n car.set_velocity(np.array([0., 0.]))\n car.set_rotation(0.)\n self._cars.append(car)\n self._drivers.append(driver)\n # randomly generate surrounding cars and drivers\n # lower lane \n x = self.right_bound - np.random.rand()*(self.gap_max-self.gap_min)\n if y_0 == 2.0:\n x_min = x_0 + self.car_length + self.gap_min\n else:\n x_min = self.left_bound\n y = 2.0\n while (x >= x_min):\n aggressive = np.random.choice([True,False])\n self.add_car(x, y, 0., 0., aggressive, 0.)\n x -= (np.random.uniform(self.gap_min,self.gap_max) + self.car_length)\n\n # upper lane\n x = self.right_bound - np.random.rand()*(self.gap_max-self.gap_min)\n if y_0 == 6.0:\n x_min = x_0 + self.car_length + self.gap_min\n else:\n x_min = self.left_bound\n y = 6.0\n while (x >= x_min):\n aggressive = np.random.choice([True,False])\n self.add_car(x, y, 0., 0., aggressive, 0.)\n x -= (np.random.uniform(self.gap_min,self.gap_max) + self.car_length)\n\n self._sup_labels = self.get_sup_labels()\n return None\n\n def setup_viewer(self):\n from traffic import rendering\n self.viewer = rendering.Viewer(1200, 800)\n self.viewer.set_bounds(-40.0, 40.0, -20.0, 20.0)\n\n def get_camera_center(self):\n return np.array([0.,4.0])\n\n def update_extra_render(self, extra_input):\n start = np.array([-100.,4.0]) - self.get_camera_center()\n end = np.array([100.,4.0]) - self.get_camera_center()\n attrs = {\"color\":(1.,1.,1.),\"linewidth\":4.}\n self.viewer.draw_line(start, end, **attrs)\n\n if extra_input:\n if ('attention_weight' in extra_input.keys()) and (extra_input['attention_weight'] is not None):\n edge_index = extra_input['attention_weight'][0]\n attention_weight = extra_input['attention_weight'][1]\n upper_indices, lower_indices = self.get_sorted_indices()\n car_indices = [np.nan]*(1+self.max_veh_num)\n car_indices[0] = 0\n car_indices[1:len(lower_indices)+1] = lower_indices[:]\n car_indices[int(self.max_veh_num/2)+1:int(self.max_veh_num/2)+1+len(upper_indices)] = upper_indices[:]\n starts, ends, attentions = [], [], []\n for i in range(edge_index.shape[1]):\n if np.isnan(car_indices[edge_index[0,i]]) or np.isnan(car_indices[edge_index[1,i]]):\n pass\n elif car_indices[edge_index[1,i]] == 0:\n attention = attention_weight[i].item()\n attentions.append(attention)\n car_i = car_indices[edge_index[0,i]]\n car_j = car_indices[edge_index[1,i]]\n start = self._cars[car_i].position - self.get_camera_center()\n end = self._cars[car_j].position - self.get_camera_center()\n starts.append(start)\n ends.append(end)\n rank_index = np.argsort(attentions)\n starts = np.array(starts)[rank_index]\n ends = np.array(ends)[rank_index]\n attentions = np.array(attentions)[rank_index]\n assert np.isclose(np.sum(attentions),1.)\n for start, end, attention in zip(starts[-3:],ends[-3:],attentions[-3:]):\n attrs = {\"color\":(1.,0.,1.),\"linewidth\":10.*attention}\n if (start == end).all():\n from traffic.rendering import make_circle, _add_attrs\n circle = make_circle(radius=1., res=15, filled=False, center=start)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle)\n else:\n self.viewer.draw_line(start, end, **attrs)\n if ('intentions' in extra_input.keys()) and (extra_input['intentions'] is not None):\n for car in self._cars[1:]:\n from traffic.rendering import make_circle, _add_attrs\n intention = extra_input['intentions'][car._idx-1]\n start = car.position - self.get_camera_center()\n attrs = {\"color\":(intention[0],intention[1],0.)}\n circle = make_circle(radius=0.5, res=15, filled=True, center=start)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle) \n\nif __name__ == '__main__':\n import time\n import pdb\n env = HighWay(num_updates=1, driver_sigma=0.1, \n obs_noise=0.1,\n )\n obs = env.reset()\n img = env.render()\n done = False\n maximum_step = 200\n t = 0\n cr = 0.\n actions = [4]*(2*maximum_step)\n # actions = np.load('/Users/xiaobaima/Dropbox/SISL/rlkit/tests/Traffic/Data/t_intersection/MyDQNcg0.1expl0.2/seed0/failure1.npy')\n while True: #not done: \n # pdb.set_trace()\n # action = actions[t][0]\n action = actions[t]\n # action = np.random.randint(env.action_space.n)\n # action = input(\"Action\\n\")\n # action = int(action)\n # while action < 0:\n # t = 0\n # cr = 0.\n # env.reset()\n # env.render()\n # action = input(\"Action\\n\")\n # action = int(action)\n t += 1\n obs, reward, done, info = env.step(action)\n print('t: ', t)\n print('action: ',action)\n print('obs: ', obs)\n print('reward: ', reward)\n print('info: ', info)\n cr += reward\n env.render()\n time.sleep(0.1)\n if (t > maximum_step) or done:\n print('cr: ',cr)\n pdb.set_trace()\n # if env._collision or env._outroad:\n # pdb.set_trace()\n t = 0\n cr = 0.\n env.reset()\n env.render()\n env.close()\n","sub_path":"tests/Traffic/traffic/scenarios/highway_2.py","file_name":"highway_2.py","file_ext":"py","file_size_in_byte":19228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"365909675","text":"import bz2\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\n\nimport gensim\nimport pkg_resources\nfrom gensim.corpora import WikiCorpus\nfrom gensim.models.word2vec import Word2Vec\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n working_dir = os.getcwd()\n prefix = 'wiki_nl_'\n threads = multiprocessing.cpu_count() * 2\n result_folder = pkg_resources.resource_filename('resources', 'results')\n dump_name = 'nlwiki-20170920-pages-articles.xml.bz2'\n wiki_dump_url = 'https://dumps.wikimedia.org/nlwiki/20170920/nlwiki-20170920-pages-articles.xml.bz2'\n saved_model_name = pkg_resources.resource_filename('resources', 'word2vec.model')\n\n if not pkg_resources.resource_exists('resources', dump_name):\n print('Please provide a wiki dump. For example {}'.format(wiki_dump_url))\n raise FileNotFoundError('No wiki dump found')\n wiki_dump = pkg_resources.resource_filename('resources', dump_name)\n\n corpus_generated = False\n\n for dirpath, dirnames, files in os.walk(result_folder):\n if files:\n corpus_generated = True\n else:\n os.makedirs(result_folder)\n\n if not corpus_generated:\n subprocess.run(['python', '-m', 'gensim.scripts.make_wiki', wiki_dump, result_folder])\n\n bz2_file = bz2.BZ2File('{}/{}wordids.txt.bz2'.format(result_folder, prefix))\n id2word = gensim.corpora.Dictionary.load_from_text(bz2_file)\n sentences = WikiCorpus(wiki_dump, dictionary=id2word).get_texts()\n\n model = Word2Vec(size=200, window=5, min_count=10, workers=threads)\n model.build_vocab(sentences)\n model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)\n model.save(saved_model_name)\n","sub_path":"smug/utils/word_vectoring_model_generator.py","file_name":"word_vectoring_model_generator.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"73858745","text":"#coding:utf-8\n\nimport json\nimport urllib.request\nimport RSS2Gen\nimport datetime\nimport os\nimport re\n\ndef getImg(url):\n r = urllib.request.urlopen(url)\n html = r.read().decode('UTF-8',)\n return re.search(r'\"\"\\W

',html).group(1)\n \ndef genRss():\n enterurl = 'http://news-at.zhihu.com/api/4/news/latest'\n r = urllib.request.urlopen(enterurl)\n html = r.read().decode('UTF-8',)\n \n data = json.loads(html)\n \n itemslist = []\n \n for story in data['stories']:\n storylink = 'http://daily.zhihu.com/story/' + str(story['id'])\n imgurl = getImg(storylink)\n img = '''
'''\n \n contenturl = 'http://news-at.zhihu.com/api/4/news/' + str(story['id'])\n r = urllib.request.urlopen(contenturl)\n html = r.read().decode('UTF-8',)\n data = json.loads(html)\n \n itemslist.append(RSS2Gen.RSSItem(\n title = data['title'],\n link = 'http://daily.zhihu.com/story/' + str(story['id']),\n description = data['body'].replace('''
''',img)\n )\n )\n \n rss = RSS2Gen.RSS2(\n title = \"知乎日报\",\n link = \"http://i.zxc.science/zhihudaily.xml\", \n description = \"一个python构建的知乎日报rss源\",\n lastBuildDate = datetime.datetime.now(),\n \n items = itemslist\n )\n \n rss.write_xml(open(\"zhihudaily.xml\", \"w\", encoding='utf-8'))\n\ngenRss()\n","sub_path":"zhihudaily.py","file_name":"zhihudaily.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"453606810","text":"\"\"\"Module containing fields and methods of the model\"\"\"\n\nfrom django.db import models\nfrom .department import Department\n\n\nclass Employee(models.Model):\n \"\"\"Class containing fields and methods of the model\"\"\"\n\n name_employee = models.CharField('Employee:', unique=True, max_length=30)\n dep = models.ForeignKey(Department, null=True, on_delete=models.SET_NULL)\n salary = models.PositiveIntegerField('Salary:', default=0)\n position = models.CharField('Position:', max_length=30)\n date = models.DateField('Date:')\n\n @staticmethod\n def get_absolute_url():\n \"\"\"A function that returns an absolute URL\"\"\"\n\n return '/employee/'\n\n def __str__(self):\n \"\"\"A function that returns a reference to the name field\"\"\"\n\n return self.name_employee\n","sub_path":"web-app/management/department/models/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"593436027","text":"import time\n\nimport sys\nsys.setrecursionlimit(30000)\n\nn = 1000\ntree = list(range(-1, n))\ntree = list(map(str, tree))\nstart = time.time()\n\n\ndef get_height(sons, node):\n height = 1\n for son in sons[node]:\n height = max(height, get_height(sons, son) + 1)\n return height\n\ndef run(n, parents):\n sons = [[] for i in range(int(n))]\n head = 0\n for i, parent in enumerate(parents):\n if parent == '-1':\n head = i\n else:\n sons[int(parent)].append(i)\n return get_height(sons, head)\n\n# n = input()\n# parents = input().strip('\\n').split(' ')\n# print(run(n, parents))\n\n\n# import sys\n#\n#\n# def run(n, tree):\n# heights = []\n# checked_indexes = {}\n# for item_index, parent_index in enumerate(tree):\n# height = 1\n# while parent_index != -1:\n# height += 1\n# parent_index = tree[parent_index]\n# calc_height = checked_indexes.get(parent_index, 0)\n# if calc_height:\n# height += calc_height\n# break\n# checked_indexes[item_index] = height\n# heights.append(height)\n# return max(heights)\n\n\nprint(run(n, tree))\nend = time.time()\nprint(end-start)","sub_path":"stepik/algorithms and data structures/2.2.py","file_name":"2.2.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"184697241","text":"try:\n from framework.main import ModuleBase\nexcept ImportError:\n pass\n\nclass UserAdd(ModuleBase):\n @property\n def tags(self):\n return ['IntrusionSet3']\n\n @property\n def needs_root(self):\n return True\n\n @property\n def relative_delay(self):\n return 55\n\n @property\n def absolute_duration(self):\n return 24 * 3600 # 1 day\n\n def do_run(self):\n import time\n from subprocess import check_call, PIPE\n username = '${USER_NAME}'\n cmd = 'useradd -m -c \"{1}\" -l -N -s /bin/false {0}'.format(username, self._banner.replace(':',''))\n try:\n check_call(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n self.hec_logger('Added a user', username=username)\n except Exception as e:\n self.hec_logger(str(e), severity='error')\n return\n time.sleep(self.absolute_duration)\n try:\n check_call('userdel -r {0}'.format(username), shell=True, stdout=PIPE, stderr=PIPE)\n self.hec_logger('Removed a user', username=username)\n except Exception as e:\n self.hec_logger(str(e), severity='error')\n\n def run(self):\n self.start()\n try:\n self.do_run()\n except Exception as e:\n self.hec_logger('Uncaught exception within module, exiting module gracefully', error=str(e),\n severity='error')\n self.finish()\n","sub_path":"framework/modules/useradd.py","file_name":"useradd.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"519326446","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 9 16:55:19 2021\n\n@author: chriskyee\n\"\"\"\n\nfrom scipy.io import loadmat\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn.decomposition import PCA\nfrom tqdm import tqdm\n\n######## Reading in music features from musicFeatures.mat to df ########\nmf_raw = loadmat('/Users/chriskye 1/Desktop/ResFinal/Data/musicFeatures.mat')\nmf_raw = mf_raw['df']\n\nmusicFeatures_raw = np.zeros((40,33))\n\n##creating feature list\nfor i in range(40):\n for index in range(0,14):\n musicFeatures_raw[i,index] = mf_raw[0,i][index][0][0]\n for index2 in range(14,27):\n musicFeatures_raw[i,index2] = mf_raw[0,i][14][index2-14][0]\n for index3 in range(27,33):\n musicFeatures_raw[i,index3] = mf_raw[0,i][index-12][0][0]\nmusicFeatures_raw = np.delete(musicFeatures_raw, 6, 1)\n\nmusicFeatures = np.repeat(musicFeatures_raw, [60]*len(musicFeatures_raw), 0)\nmusicFeatures = np.tile(musicFeatures, (32,1))\n \nmusicFeatures = pd.DataFrame(musicFeatures,columns=['RMS', 'Fluctuation Peak',\n 'Fluctuation Centroid','Tempo','Pulse Clarity', 'Mean Attack Time',\n 'Zero Cross Rate', 'Spectral Centroid', 'Spectral Spread',\n 'Spectral Skewness', 'Spectral Kurtosis', 'Spectral Flatness', 'Spectral Entropy',\n 'MFCC1', 'MFCC2', 'MFCC3', 'MFCC4', 'MFCC5', 'MFCC6', 'MFCC7', 'MFCC8', 'MFCC9',\n 'MFCC10', 'MFCC11', 'MFCC12', 'MFCC13', 'Harmonic Change', 'Key Clarity',\n 'Majorness', 'Roughness', 'Chroma Std', 'Novelty'])\n\n######## Reading in EEG Features to df ########\ninfile = open('/Users/chriskye 1/Desktop/ResFinal/Data/channelPSD.dat', 'rb')\nef_raw = pickle.load(infile)\n\n## reshape to 2d\neegFeatures = ef_raw.transpose(2,0,1).reshape(76800, 256)\n\n## assign colnames & make into pd df\ncolnames = []\nfor i in range(32):\n for j in range(8):\n name = 'Chn' + str(i+1) + '_' + 'band' + str(j+1)\n colnames.append(name)\neegFeatures = pd.DataFrame(eegFeatures, columns=colnames)\n\n######## Feature Fusion ########\nfullFeatures = musicFeatures.join(eegFeatures)\n\noutfile = open('/Users/chriskye 1/Desktop/ResFinal/Data/fullFeatures.dat', 'wb')\npickle.dump(fullFeatures, outfile)\noutfile.close()\n\n######## Creating Categorial Lables ########\ndeap_folder = '/Users/chriskye 1/Desktop/DEAP/data_preprocessed_python/'\nfile_list_test = ['s01.dat', 's02.dat'] ## test directory\nfile_list = ['s01.dat', 's02.dat', 's03.dat', 's04.dat', 's05.dat', 's06.dat',\n 's07.dat', 's08.dat', 's09.dat', 's10.dat', 's11.dat', 's12.dat',\n 's13.dat', 's14.dat', 's15.dat', 's16.dat', 's17.dat', 's18.dat',\n 's19.dat', 's20.dat', 's21.dat', 's22.dat', 's23.dat', 's24.dat',\n 's25.dat', 's26.dat', 's27.dat', 's28.dat', 's29.dat', 's30.dat',\n 's31.dat', 's32.dat']\n\n## OG label list\nlabels_numerical = np.zeros((1,2))\nfor filename in tqdm(file_list):\n data = pickle.load(open(deap_folder + filename, 'rb'), encoding = 'bytes')\n label_raw = data[b'labels'][...,0:2]\n labels_numerical = np.append(labels_numerical, label_raw, axis=0)\nlabels_numerical = labels_numerical[1:,...]\n\n## Labels to categorical\nlabels_categorical = np.zeros((1280,2))\nfor row in range(1280):\n labels_categorical[row,0] = (labels_numerical[row,0] > 5)\n labels_categorical[row,1] = (labels_numerical[row,1] > 5)\n\nlabels_categorical = np.repeat(labels_categorical, [60]*len(labels_categorical), axis = 0)\n\nlabels_numerical = pd.DataFrame(labels_numerical, columns=['Valence','Arousal'])\nlabels_categorical = pd.DataFrame(labels_categorical, columns=['Valence','Arousal'])\n\noutfile = (open('/Users/chriskye 1/Desktop/ResFinal/Data/labels.dat', 'wb'))\npickle.dump(labels_categorical, outfile)\noutfile.close()\n\n\n\n\n\n\n ","sub_path":"Scripts/featureFusion.py","file_name":"featureFusion.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"641810927","text":"import torch.nn as nn\nimport torch\n\nclass HypertrophyClassifier(nn.Module):\n def __init__(self):\n super(HypertrophyClassifier, self).__init__()\n\n self.conv = nn.Conv2d(in_channels=3, out_channels=9, kernel_size=(3,3), stride=1) # (c=3, (224,224)) => (c=9, (222, 222))\n self.conv2 = nn.Conv2d(in_channels=9, out_channels=18, kernel_size=(3,3), stride=1) # (c=9, (222,222)) => (c=18, (220, 220))\n self.conv3 = nn.Conv2d(in_channels=18, out_channels=18, kernel_size=(2,2), stride=2) # (c=18, (220,220)) => (c=18, (110, 110))\n self.conv4 = nn.Conv2d(in_channels=18, out_channels=9, kernel_size=(2,2), stride=1, padding=1) # (c=9, (110,110)) => (c=9, (110, 110))\n self.conv5 = nn.Conv2d(in_channels=9, out_channels=6, kernel_size=(2,2), stride=1, padding=1) # (c=6, (110,110)) => (c=9, (110, 110))\n self.linear1 = nn.Linear(75264, 200)\n self.linear2 = nn.Linear(200, 3)\n\n self.relu = nn.ReLU()\n\n def forward(self, x):\n temp = self.relu(self.conv(x))\n temp = self.relu(self.conv2(temp))\n temp = self.relu(self.conv3(temp))\n temp = self.relu(self.conv4(temp))\n temp = self.relu(self.conv5(temp))\n temp = temp.view(-1, 75264)\n temp = self.relu(self.linear1(temp))\n temp = self.linear2(temp)\n return temp","sub_path":"hypertrophy_classifier.py","file_name":"hypertrophy_classifier.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"83573719","text":"from flask import render_template,request,redirect,url_for,abort\nfrom ..models import Comment,User,Pitch\n# We may also use the import * command to import all objects from a specific module e.g from ..models import *\n# ,get_pitch,get_comments\nfrom . import main\nfrom .forms import CommentForm, PitchForm,UpdateProfile\nfrom flask_login import login_required, current_user\nfrom .. import db,photos\nimport markdown2\n\n\ndef save_pitch(pitch):\n Pitch.save_pitch(pitch)\n\n@main.route('/')\n\ndef index():\n pitches = Pitch.query.order_by(Pitch.posted.desc()).all()\n '''\n my index page\n :return:\n '''\n return render_template('index.html', pitches=pitches )\n\n@main.route('/user/')\ndef profile(uname):\n user = User.query.filter_by(username = uname).first()\n\n if user is None:\n abort(404)\n\n return render_template(\"profile/profile.html\", user = user)\n\n\n@main.route('/user//update',methods = ['GET','POST'])\n@login_required\ndef update_profile(uname):\n user = User.query.filter_by(username = uname).first()\n if user is None:\n abort(404)\n\n form = UpdateProfile()\n\n if form.validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('.profile',uname=user.username))\n\n return render_template('profile/update.html',form =form)\n\n\n@main.route('/user//update/pic',methods= ['POST'])\n@login_required\ndef update_pic(uname):\n user = User.query.filter_by(username = uname).first()\n if 'photo' in request.files:\n filename = photos.save(request.files['photo'])\n path = f'photos/{filename}'\n user.profile_pic_path = path\n db.session.commit()\n return redirect(url_for('main.profile',uname=uname))\n\n\n@main.route('/category/')\n@login_required\n\ndef fetchcategory(category):\n\n '''\n View pitch page function that returns the pitch details page and its data\n '''\n category = Pitch.get_pitch(category)\n if request.args.get(\"vote\"):\n pitch.likes = pitch.likes + 1\n pitch.save_pitch()\n print(category)\n return render_template('pitch.html', category=category,pitch=pitch)\n\n@main.route('/comments/')\n@login_required\ndef comment(id):\n comments =Comment.get_comments(id)\n print(comment)\n title = 'comments'\n return render_template('comments.html',comments = comments,title = title)\n\n@main.route('/comment/', methods = ['GET', 'POST'])\n@login_required\ndef new_comment(pitches_id):\n pitches = Pitch.query.filter_by(id = pitches_id).first()\n form = CommentForm()\n\n if form.validate_on_submit():\n comment = form.comment.data\n\n new_comment = Comment(comment_content=comment,user_id=current_user.id, pitches_id=pitches_id)\n\n new_comment.save_comment()\n\n return redirect(url_for('main.index'))\n title='New Pitch'\n return render_template('new_comment.html',title=title,comment_form = form,pitches_id=pitches_id)\n\n@main.route('/pitch/', methods=['GET', 'POST'])\n@login_required\ndef pitch():\n form = PitchForm()\n print('working')\n if form.validate_on_submit():\n title = form.title.data\n content = form.content.data\n category=form.category.data\n\n # Updated comment instance\n new_pitch = Pitch( pitch_title=title,pitch_content=content,pitch_category=category,user_id=current_user.id)\n\n # save comment method\n new_pitch.save_pitch()\n return redirect(url_for('.single_pitch',pitch_id = new_pitch.id ))\n\n title = 'pitch'\n return render_template('new_pitch.html', pitch_form=form)\n\n\n\n@main.route('/pitch/',methods=[\"GET\",\"POST\"])\n@login_required\n\ndef single_pitch(pitch_id):\n pitches = Pitch.query.filter_by(id=pitch_id).one()\n\n comments=Comment.get_comments(pitch_id)\n\n\n form =CommentForm()\n if form.validate_on_submit():\n comment=form.comment.data\n\n\n new_comment = Comment(comment_content=comment,user_id=current_user.id, pitch_id=pitch_id)\n\n db.session.add(new_comment)\n db.session.commit()\n return redirect(url_for('main.pitch_comments', pitch_id=pitches.id))\n\n\n # new_comment.save_comment()\n\n # return redirect(url_for('.view_pitch', id=pitches.id, comments=comments))\n\n return render_template('added_pitch.html',pitch = pitches,form=form, comments=comments)\n\n\n@main.route('/pitch_comments/' ,methods=['GET', 'POST'])\n@login_required\n\ndef pitch_comments(pitch_id):\n\n pitch = Pitch.query.filter_by(id=pitch_id).one()\n # comments=Comment.get_comments(pitch_id)\n comments=Comment.get_comments(pitch_id)\n\n\n return render_template('pitch_comments.html', pitch=pitch, comments=comments, pitch_id=pitch.id)\n\n\n\n@main.route(\"/view/\", methods=[\"GET\",\"POST\"])\n@login_required\n\ndef view_pitch(id):\n pitch = Pitch.query.get(id)\n if request.args.get(\"vote\"):\n pitch.likes = pitch.likes + 1\n pitch.save_pitch()\n return redirect(\"/view/{pitch_id}\".format(pitch_id=id))\n return render_template('view_pitch.html',pitch = pitch, comment=comment)\n\n\n\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"554799229","text":"from flask import request, url_for, jsonify\nfrom flask_api import FlaskAPI, status, exceptions\nfrom pymongo import MongoClient\n\n\napp = FlaskAPI(__name__)\n# Home page\n@app.route(\"/\", methods=['GET'])\ndef list():\n mongo_uri = \"mongodb://mongo-router:27017\"\n\n client = MongoClient(mongo_uri)\n db = client.tarea\n collection = db.usuarios\n\n cursor = collection.find()\n\n notes = []\n\n for note in cursor:\n # Se adicionó para poder manejar ObjectID\n note['_id'] = str(note['_id'])\n notes.append(note)\n\n return notes\n# Dispositivos page\n# Para mostrar los países en donde hay dispositivos de la marca Sony\n@app.route(\"/dispositivos\", methods=['GET'])\ndef list_dispositivos():\n mongo_uri = \"mongodb://mongo-router:27017\"\n\n client = MongoClient(mongo_uri)\n db = client.tarea\n collection = db.dispositivos\n\n pipeline = [{\"$match\":{\"marca\":\"Sony\"}},{\"$project\":{\"marca\":1,\"pais\":1, \"_id\":0}}, {\"$sort\":{\"_id\":1}}]\n\n cursor = collection.aggregate(pipeline)\n\n return cursor\n# Direccion page\n# # Regresa la cantidad de dispositivos en México\n# El $count es equivalente a un $group + $project\n@app.route(\"/direccion\", methods=['GET'])\ndef list():\n mongo_uri = \"mongodb://mongo-router:27017\"\n\n client = MongoClient(mongo_uri)\n db = client.tarea\n collection = db.direcciones\n\n pipeline = [{\"$match\": {\"ubicacion\" :\"Mexico\"}}, {\"$count\": \"ubicacion\"}]\n\n cursor = collection.aggregate(pipeline)\n\n return cursor\n\n@app.route(\"/usuarios\", methods=['GET'])\ndef list():\n mongo_uri = \"mongodb://mongo-router:27017\"\n\n client = MongoClient(mongo_uri)\n db = client.tarea\n collection = db.usuarios\n\n pipeline = [{\"$match\":{\"genero\":\"female\", \"direccion_id\":{\"$gte\": 1000}}}, {\"$project\":{\"nombre\":1} }, {\"$sort\": {\"_id\":1}}]\n\n cursor = collection.aggregate(pipeline)\n\n return cursor\n\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"flask-mongo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"182874333","text":"\"\"\"\r\n\r\n 一般字符\r\n\r\n 字符 含 义\r\n\r\n . 匹配任意单个字符(不包括换行符\\n)\r\n \\ 转义字符(把有特殊含义的字符转换成字面意思)\r\n[...] 字符集。对应字符集中的任意字符\r\n\r\n\".\" 字符为匹配任意单个字符,例如,a.b可以的匹配结果为abd、aic、a&c等,但不包含换行符\r\n\"\\\" 字符为转义字符,可以把字符改变为原来的意思。例如\".\"字符,是匹配任意单个字符,但有时不需要这个功能,只想让它代表一个点,,这时\r\n 就可以使用\"\\.\",就能匹配为\".\"了\r\n\"[...]\" 为字符集,相当于在中括号中任选一个。例如a[bcd],匹配的结果为ab、ac、ad。\r\n\r\n\r\n 预定义字符\r\n\r\n字符 含 义\r\n \\d 匹配一个数字字符。等价于[0-9]\r\n \\D 匹配一个非数字字符。等价于[^0-9]\r\n \\s 匹配任何空白字符,包括空格、制表符、换页符等。等价于[\\f\\n\\r\\t\\v]\r\n \\S 匹配任何非空白字符。等价于[^\\f\\n\\r\\t\\v]\r\n \\w 匹配包括下划线的任何单词字符。等价于[A-Za-z0-9_]\r\n \\W 匹配任何非单词字符。等价于[^A-Za-z0-9_]\r\n\r\n\r\n 数量词\r\n\r\n数量词 含 义\r\n * 匹配前一个字符0或无限次\r\n + 匹配前一个字符1或无限次\r\n ? 匹配前一个字符0或1次\r\n {m} 匹配前一个字符m次\r\n{m, n} 匹配前一个字符m至n次\r\n\r\n\"*\" 数量词匹配前一个字符0或无限次,例如,ab*c匹配ac、abc、abbc 和 abbbc 等。\r\n\"+\" \"+\" 与 \"*\" 很类似,只是至少匹配前一个字符一次。例如,ab+c匹配abc、abbc 和 abbbc 等。\r\n\"?\" 数量词匹配前一个字符0或1次。例如,ab?c匹配ac 和 abc。\r\n{m} 数量词匹配前一个字符m次。例如,ab{3}匹配abbbc。\r\n{m, n} 数量词匹配前一个字符m至n次。例如,ab{1,3}匹配abc、abbc、abbbc。\r\n\r\n\r\n 边界匹配\r\n\r\n边界匹配 含 义\r\n ^ 匹配字符串开头\r\n $ 匹配字符串结尾\r\n \\A 仅匹配字符串开头\r\n \\Z 仅匹配字符串结尾\r\n\r\n\"^\" 匹配字符串的开头,例如,^abc匹配abc开头的字符串\r\n\"$\" 匹配字符串的结尾,例如,abc$匹配abc结尾的字符串\r\n\"\\A\" 仅匹配字符串的开头,例如,\\Aabc\r\n\"\\Z\" 仅匹配字符串的结尾,例如,abc\\Z\r\n\r\n\"\"\"\r\n\r\nimport re\r\n\r\na = \"ssIssfddfgssLovessfdfdsfedsfssPythonss\"\r\n\r\ninfos = re.findall(\"ss(.*?)ss\", a)\r\n\r\nprint(infos)\r\n\r\n\"\"\"\r\n\r\nsearch() 匹配并提取第一个符合规律的内容,返回一个正则表达式对象\r\n\r\nre.match(pattern, string, flags=0)\r\n\r\npattern 为匹配的正则表达式\r\nstring 为要匹配的字符串\r\nflags 为标志符,用于控制正则表达式的匹配方式,如是否区分大小写,多行匹配等\r\n\r\n\"\"\"\r\na = \"one1two2three3\"\r\n\r\ninfos = re.match(\"\\d+\", a)\r\n\r\nprint(infos)\r\n\r\ninfos = re.search(\"\\d+\", a)\r\n\r\nprint(infos)\r\n\r\nprint(infos.group())\r\n\r\n\"\"\"\r\n\r\nsub() 用于替换字符串中的匹配项\r\n\r\nre.sub(pattern, repl, string, count=0, flags=0)\r\n\r\npattern 为匹配的正则表达式\r\nrepl 为替换的字符串\r\nstring 为要被查找替换的原始字符串\r\ncounts 为模式匹配后替换的最大次数,默认 0 表示替换所有的匹配\r\nflags 为标志符,用于控制正则表达式的匹配方式,如是否区分大小写,多行匹配等\r\n\r\n\"\"\"\r\n\r\nphone = \"123-1234-5678\"\r\n\r\nnew_phone = re.sub(\"\\D\", \"\", phone)\r\n\r\nprint(new_phone)\r\n\r\n\"\"\"\r\n\r\nfindall() 匹配所有符合规律的内容,并以列表的形式返回结果\r\n\r\n\"\"\"\r\n\r\na = \"one1two2three3\"\r\n\r\ninfos = re.findall(\"\\d+\", a)\r\n\r\nprint(infos)\r\n\r\n\"\"\"\r\n\r\n re模块修饰符\r\n \r\n修饰符 描 述\r\nre.I 使匹配对大小写不敏感\r\nre.L 做本地化识别(locale-aware)匹配\r\nre.M 多行匹配,影响 ^ 和 $\r\nre.S 使匹配包括换行在内的所有字符\r\nre.U 根据Unicode字符集解析字符。这个标志影响\\w \\W \\b \\B\r\nre.X 该标志通过给予更灵活的格式,以便将正则表达式写的更易理解\r\n\r\n\"\"\"\r\n\r\na = '
指数
'\r\n\r\nword = re.findall('
(.*?)
', a)\r\n\r\nprint(word)\r\n\r\na = '''
指数\r\n
'''\r\n\r\nword = re.findall('
(.*?)
', a, re.S)\r\n\r\nprint(word)\r\n\r\nprint(word[0].strip()) # 使用strip() 方法去除换行\r\n","sub_path":"zz_正则表达式.py","file_name":"zz_正则表达式.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"152398347","text":"def add_time(start, duration, startingDay = None):\n\n splitList = start.split(':') # Aux list to store start time in tuple, time separated by hours and minutes + period\n\n startInfo = (splitList[0], splitList[1].split()[0], splitList[1].split()[1]) # Tuple with (hours,minutes,period)\n # print(startInfo)\n addInfo = tuple(duration.split(':')) # Tuple with (hours, minutes) to add to the start time\n\n\n # Stores ints corresponding to starting time, accounts for both periods of the day\n if 'AM' in startInfo:\n if int(startInfo[0]) == 12:\n startHours = 0 # 12 AM corresponds to 0 total starting hours\n startMinutes = int(startInfo[1])\n else:\n startHours = int(startInfo[0])\n startMinutes = int(startInfo[1])\n elif 'PM' in startInfo:\n if int(startInfo[0]) == 12:\n startHours = 12 # 12 PM corresponds to 12 total starting hours\n startMinutes = int(startInfo[1])\n else:\n startHours = int(startInfo[0]) + 12 # Sum 12 to the total to account for the past first half of the day\n startMinutes = int(startInfo[1])\n # print(startHours,startMinutes)\n\n # Stores ints for total hours and total minutes separately for later addition\n addHours = int(addInfo[0])\n addMinutes = int(addInfo[1])\n\n # print(addHours,addMinutes)\n\n # Calculate total hours and total minutes separately, without converting extra minutes yet\n\n totalHours = startHours + addHours\n totalMinutes = startMinutes + addMinutes\n\n #print(totalHours,totalMinutes)\n\n # Calculate remainder telling us the final minutes to be displayed after conversion of minutes over 59 to hours\n finalMinutes = totalMinutes % 60\n\n # Pad final minutes to be displayed with a 0 for when they're less than 10\n finalMinutesStr = str(finalMinutes).zfill(2)\n\n # Calculate no. of hours to add from minutes over 59, add to total hours\n finalTotalHours = totalMinutes // 60\n finalTotalHours += totalHours\n\n #print(finalTotalHours,finalMinutes)\n\n # Calculate no. of days past after duration is added to the starting hour\n finalHours24 = finalTotalHours % 24\n\n extraDays = finalTotalHours // 24\n #print(extraDays)\n #print(finalHours24,extraDays)\n\n # Convert hours to 12 hour format\n if finalHours24 < 12:\n if finalHours24 == 0:\n finalHours12 = '12' #\n else:\n finalHours12 = str(finalHours24)\n dayHalf = 'AM'\n else:\n if finalHours24 == 12:\n finalHours12 = '12'\n else:\n finalHours12 = str(finalHours24 - 12)\n dayHalf = 'PM'\n\n # Check if day is the same, prepare part of final string, empty if the day is the same.\n if extraDays == 0:\n printExtraDays = ''\n elif extraDays == 1:\n printExtraDays = ' (next day)'\n else:\n printExtraDays = f' ({extraDays} days later)'\n\n\n # Tuple containing str with resulting day of the week in case starting day was provided, and an empty string otherwise\n weekDays = (', Monday',', Tuesday',', Wednesday',', Thursday',', Friday',', Saturday',', Sunday','')\n\n dayIndex = 7 # Case where starting day was not provided\n\n # Calculates index by using remainder of division by 7 to know the day of the week. Necessary in case duration is longer than a week.\n # 0 is Monday, 1 is Tuesday until 6 - Sunday\n # Final index goes from 0-6\n if startingDay is not None:\n if startingDay.lower() == 'monday':\n dayIndex = ((0 + extraDays) % 7)\n elif startingDay.lower() == 'tuesday':\n dayIndex = ((1 + extraDays) % 7)\n elif startingDay.lower() == 'wednesday':\n dayIndex = ((2 + extraDays) % 7)\n elif startingDay.lower() == 'thursday':\n dayIndex = ((3 + extraDays) % 7)\n elif startingDay.lower() == 'friday':\n dayIndex = ((4 + extraDays) % 7)\n elif startingDay.lower() == 'saturday':\n dayIndex = ((5 + extraDays) % 7)\n elif startingDay.lower() == 'sunday':\n dayIndex = ((6 + extraDays) % 7)\n # print(dayIndex)\n weekDay = weekDays[dayIndex] # Assigns correct week day of result or nothing if starting day was not provided\n\n # Format final string to be returned\n new_time = f'{finalHours12}:{finalMinutesStr} {dayHalf}' + weekDay + printExtraDays\n\n return new_time\n\n\n # return new_time\n","sub_path":"time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"450551404","text":"#!/usr/bin/env pyformex --gui\n# $Id$\n##\n## This file is part of pyFormex 0.7.2 Release Tue Sep 23 16:18:43 2008\n## pyFormex is a Python implementation of Formex algebra\n## Website: http://pyformex.berlios.de/\n## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be) \n##\n## This program is distributed under the GNU General Public License\n## version 2 or later (see file COPYING for details)\n##\n\"\"\"Lamella Dome\n\nlevel = 'beginner'\ntopics = ['geometry','domes']\ntechniques = ['colors']\n\n\"\"\"\n\nclear()\nnx=12 # number of modules in circumferential direction\nny=8 # number of modules in meridional direction\nrd=100 # radius of the sphere cap\nt=50 # slope of the dome at its base (= half angle of the sphere cap)\na=2 # size of the top opening\nrings=False # set to True to include horizontal rings\ne1 = Formex([[[0,0],[1,1]]],1).rosette(4,90).translate([1,1,0]) # diagonals\ne2 = Formex([[[0,0],[2,0]]],0) # border\nf1 = e1.replic2(nx,ny,2,2)\nif rings:\n f2 = e2.replic2(nx,ny+1,2,2)\nelse:\n f2 = e2.replic2(nx,2,2,2*ny)\ng = (f1+f2).translate([0,a,1]).spherical(scale=[180/nx,t/(2*ny+a),rd],colat=True)\ndraw(e1+e2)\n\ndraw(f1+f2)\n\nclear()\ndraw(g)\n","sub_path":"tags/release-0.7.2/pyformex/examples/Lamella.py","file_name":"Lamella.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"230874794","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\nfrom flask_migrate import Migrate\nfrom datetime import datetime\nfrom sqlalchemy.sql import func\nimport sys\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://alanzhihaolu@localhost:5432/fyyurapp'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.debug = True\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\n# TODO: connect to a local postgresql database\n\nmigrate = Migrate(app, db)\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n\nclass Venue(db.Model):\n __tablename__ = 'venue'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n address = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n genres = db.Column(db.ARRAY(db.String))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n seeking_talent = db.Column(db.Boolean)\n seeking_description = db.Column(db.String(200), nullable=True)\n website = db.Column(db.String(120))\n shows = db.relationship('Show', backref='venue', lazy=True)\n def get_venue(self):\n return {\n \"id\": self.id,\n 'name': self.name,\n 'num_upcoming_shows': Show.query.filter(Show.start_time > datetime.now()).filter(Show.venue_id==self.id).count()\n }\n # children = db.relationship(\"Show\", back_populates=\"parent\")\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n\nclass Artist(db.Model):\n __tablename__ = 'artist'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n genres = db.Column(db.ARRAY(db.String))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n seeking_venue = db.Column(db.Boolean)\n seeking_description = db.Column(db.String(200), nullable=True)\n website = db.Column(db.String(120))\n shows = db.relationship('Show', backref='artist', lazy=True)\n # parents = db.relationship(\"Show\", back_populates=\"child\")\n \n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n\n# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.\n\nclass Show(db.Model):\n __tablename__ = \"show\"\n id = db.Column(db.Integer, primary_key=True)\n start_time = db.Column(db.DateTime)\n artist_id = db.Column(db.Integer(), db.ForeignKey('artist.id'), nullable=False) \n venue_id = db.Column(db.Integer(), db.ForeignKey('venue.id'), nullable=False)\n def get_artistInfo(self):\n artistInfo = Artist.query.filter_by(id=self.artist_id).first()\n artist_name = artistInfo.name\n artist_image_link = artistInfo.image_link\n return {\n 'artist_id': self.artist_id,\n 'artist_name': artist_name,\n 'artist_image_link': artist_image_link,\n 'start_time' : self.start_time\n }\n def get_venueInfo(self):\n venueInfo = Venue.query.filter_by(id=self.venue_id).first()\n venue_name = venueInfo.name\n venue_image_link = venueInfo.image_link\n return {\n 'venue_id': self.venue_id,\n 'venue_name': venue_name,\n 'venue_image_link': venue_image_link,\n 'start_time' : self.start_time\n }\n # child = db.relationship(\"Artist\", back_populates=\"parents\")\n # parent = db.relationship(\"Venue\", back_populates=\"children\")\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n# @app.route('/venues')\n# def venues():\n# data = Venue.query.order_by(Venue.id).all()\n# for i in data:\n# currentID = i.id\n# upcoming_shows = Show.query.filter(Show.start_time > datetime.now()).filter(Show.venue_id==currentID).count()\n# i.num_upcoming_shows = upcoming_shows\n# return render_template('pages/venues.html', areas=data)\n\n@app.route('/venues')\ndef venues():\n areas = Venue.query.distinct('city','state').all()\n data = []\n for area in areas:\n venues = Venue.query.filter(Venue.city == area.city, Venue.state == area.state).all()\n record = {\n 'city': area.city,\n 'state': area.state,\n 'venues': [venue.get_venue() for venue in venues],\n }\n data.append(record)\n return render_template('pages/venues.html', areas=data)\n\n\n# @app.route('/venues')\n# def venues():\n# # TODO: replace with real venues data.\n# # num_shows should be aggregated based on number of upcoming shows per venue.\n# data=[{\n# \"city\": \"San Francisco\",\n# \"state\": \"CA\",\n# \"venues\": [{\n# \"id\": 1,\n# \"name\": \"The Musical Hop\",\n# \"num_upcoming_shows\": 0,\n# }, {\n# \"id\": 3,\n# \"name\": \"Park Square Live Music & Coffee\",\n# \"num_upcoming_shows\": 1,\n# }]\n# }, {\n# \"city\": \"New York\",\n# \"state\": \"NY\",\n# \"venues\": [{\n# \"id\": 2,\n# \"name\": \"The Dueling Pianos Bar\",\n# \"num_upcoming_shows\": 0,\n# }]\n# }]\n# return render_template('pages/venues.html', areas=data)\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n search_term=request.form.get('search_term', '')\n response = {\n 'data': Venue.query.filter(Venue.name.ilike(f'%{search_term}%')).all()\n }\n response['count'] = len(response['data'])\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n# @app.route('/venues/search', methods=['POST'])\n# def search_venues():\n# # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n# # seach for Hop should return \"The Musical Hop\".\n# # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n# response={\n# \"count\": 1,\n# \"data\": [{\n# \"id\": 2,\n# \"name\": \"The Dueling Pianos Bar\",\n# \"num_upcoming_shows\": 0,\n# }]\n# }\n# return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n venueData = Venue.query.filter_by(id = venue_id).all()\n venueData = venueData[0]\n data = {\n \"id\": venueData.id,\n \"name\": venueData.name,\n \"city\": venueData.city,\n \"state\": venueData.state,\n \"address\": venueData.address,\n \"phone\": venueData.phone,\n \"genres\": venueData.genres,\n \"image_link\": venueData.image_link,\n \"facebook_link\": venueData.facebook_link,\n \"seeking_talent\": venueData.seeking_talent,\n \"seeking_description\": venueData.seeking_description,\n \"website\": venueData.website\n }\n past_shows = Show.query.filter(Show.start_time < datetime.now()).filter(Show.venue_id==venue_id).all()\n data['past_shows'] = [show.get_artistInfo() for show in past_shows]\n upcoming_shows = Show.query.filter(Show.start_time > datetime.now()).filter(Show.venue_id==venue_id).all()\n data['upcoming_shows'] = [show.get_artistInfo() for show in upcoming_shows]\n data['past_shows_count'] = len(past_shows)\n data['upcoming_shows_count'] = len(upcoming_shows)\n return render_template('pages/show_venue.html', venue=data)\n\n# @app.route('/venues/')\n# def show_venue(venue_id):\n# # shows the venue page with the given venue_id\n# # TODO: replace with real venue data from the venues table, using venue_id\n# data1={\n# \"id\": 1,\n# \"name\": \"The Musical Hop\",\n# \"genres\": [\"Jazz\", \"Reggae\", \"Swing\", \"Classical\", \"Folk\"],\n# \"address\": \"1015 Folsom Street\",\n# \"city\": \"San Francisco\",\n# \"state\": \"CA\",\n# \"phone\": \"123-123-1234\",\n# \"website\": \"https://www.themusicalhop.com\",\n# \"facebook_link\": \"https://www.facebook.com/TheMusicalHop\",\n# \"seeking_talent\": True,\n# \"seeking_description\": \"We are on the lookout for a local artist to play every two weeks. Please call us.\",\n# \"image_link\": \"https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60\",\n# \"past_shows\": [{\n# \"artist_id\": 4,\n# \"artist_name\": \"Guns N Petals\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80\",\n# \"start_time\": \"2019-05-21T21:30:00.000Z\"\n# }],\n# \"upcoming_shows\": [],\n# \"past_shows_count\": 1,\n# \"upcoming_shows_count\": 0,\n# }\n# data2={\n# \"id\": 2,\n# \"name\": \"The Dueling Pianos Bar\",\n# \"genres\": [\"Classical\", \"R&B\", \"Hip-Hop\"],\n# \"address\": \"335 Delancey Street\",\n# \"city\": \"New York\",\n# \"state\": \"NY\",\n# \"phone\": \"914-003-1132\",\n# \"website\": \"https://www.theduelingpianos.com\",\n# \"facebook_link\": \"https://www.facebook.com/theduelingpianos\",\n# \"seeking_talent\": False,\n# \"image_link\": \"https://images.unsplash.com/photo-1497032205916-ac775f0649ae?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=750&q=80\",\n# \"past_shows\": [],\n# \"upcoming_shows\": [],\n# \"past_shows_count\": 0,\n# \"upcoming_shows_count\": 0,\n# }\n# data3={\n# \"id\": 3,\n# \"name\": \"Park Square Live Music & Coffee\",\n# \"genres\": [\"Rock n Roll\", \"Jazz\", \"Classical\", \"Folk\"],\n# \"address\": \"34 Whiskey Moore Ave\",\n# \"city\": \"San Francisco\",\n# \"state\": \"CA\",\n# \"phone\": \"415-000-1234\",\n# \"website\": \"https://www.parksquarelivemusicandcoffee.com\",\n# \"facebook_link\": \"https://www.facebook.com/ParkSquareLiveMusicAndCoffee\",\n# \"seeking_talent\": False,\n# \"image_link\": \"https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80\",\n# \"past_shows\": [{\n# \"artist_id\": 5,\n# \"artist_name\": \"Matt Quevedo\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1495223153807-b916f75de8c5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=334&q=80\",\n# \"start_time\": \"2019-06-15T23:00:00.000Z\"\n# }],\n# \"upcoming_shows\": [{\n# \"artist_id\": 6,\n# \"artist_name\": \"The Wild Sax Band\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80\",\n# \"start_time\": \"2035-04-01T20:00:00.000Z\"\n# }, {\n# \"artist_id\": 6,\n# \"artist_name\": \"The Wild Sax Band\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80\",\n# \"start_time\": \"2035-04-08T20:00:00.000Z\"\n# }, {\n# \"artist_id\": 6,\n# \"artist_name\": \"The Wild Sax Band\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80\",\n# \"start_time\": \"2035-04-15T20:00:00.000Z\"\n# }],\n# \"past_shows_count\": 1,\n# \"upcoming_shows_count\": 1,\n# }\n# data = list(filter(lambda d: d['id'] == venue_id, [data1, data2, data3]))[0]\n# return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm(request.form)\n return render_template('forms/new_venue.html', form=form)\n\n# @app.route('/venues/create', methods=['POST'])\n# def create_venue_submission():\n# form = VenueForm(request.form, meta={'csrf': False})\n# name=form.name.data,\n# city=form.city.data,\n# state=form.state.data,\n# address=form.address.data,\n# phone=form.phone.data,\n# facebook_link=form.facebook_link.data,\n# image_link=form.image_link.data,\n# website=form.website.data,\n# seeking_talent=form.seeking_talent.data,\n# seeking_description=form.seeking_description.data\n# if form.validate():\n# try:\n# venue = Venue(\n# name=name,\n# city=city,\n# state=state,\n# address=address,\n# phone=phone,\n# genres=request.form.getlist('genres'),\n# facebook_link=facebook_link,\n# image_link=image_link,\n# website=website,\n# seeking_talent=seeking_talent,\n# seeking_description=seeking_description\n# )\n# db.session.add(venue)\n# db.session.commit()\n# flash('Venue ' + form.name.data + ' was successfully listed!')\n# except ValueError as e:\n# print(e)\n# db.session.rollback()\n# flash('An error occurred. Venue ' + form.name.data + ' could not be listed.')\n# finally:\n# db.session.close()\n# else:\n# message = []\n# for field, errors in form.errors.items():\n# message.append(field + ': (' + '|'.join(errors) + ')')\n# return render_template('pages/home.html')\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n error = False\n data = request.form\n vname = data['name']\n vcity = data['city']\n vstate = data['state']\n vaddress = data['address']\n vphone = data['phone']\n vgenres = request.form.getlist('genres')\n vfb_link = data['facebook_link']\n vimage_link = data['image_link']\n vwebsite = data['website']\n if data['seeking_talent'] == 'True':\n vseeking_talent = True\n else:\n vseeking_talent = False\n vseeking_description = data['seeking_description']\n try:\n db.session.add(Venue(\n city=vcity,\n state=vstate,\n name=vname,\n address=vaddress,\n phone=vphone,\n facebook_link=vfb_link,\n genres=vgenres,\n seeking_talent=vseeking_talent,\n seeking_description=vseeking_description,\n website=vwebsite,\n image_link=vimage_link\n ))\n except:\n error = True\n finally:\n if not error:\n db.session.commit()\n flash('Venue ' + request.form['name'] +\n ' was successfully listed!')\n else:\n flash('An error occurred. Venue ' +\n vname + ' could not be listed.')\n db.session.rollback()\n return render_template('pages/home.html')\n\n# @app.route('/venues/create', methods=['POST'])\n# def create_venue_submission():\n# # TODO: insert form data as a new Venue record in the db, instead\n# # TODO: modify data to be the data object returned from db insertion\n\n# # on successful db insert, flash success\n# flash('Venue ' + request.form['name'] + ' was successfully listed!')\n# # TODO: on unsuccessful db insert, flash an error instead.\n# # e.g., flash('An error occurred. Venue ' + data.name + ' could not be listed.')\n# # see: http://flask.pocoo.org/docs/1.0/patterns/flashing/\n# return render_template('pages/home.html')\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n try:\n Show.query.filter_by(venue_id = venue_id).delete()\n Venue.query.filter_by(id = venue_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n finally:\n db.session.close()\n return None\n\n# @app.route('/venues/', methods=['DELETE'])\n# def delete_venue(venue_id):\n# # TODO: Complete this endpoint for taking a venue_id, and using\n# # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\n\n# # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\n# # clicking that button delete it from the db then redirect the user to the homepage\n# return None\n\n# Artists\n# ----------------------------------------------------------------\n# @app.route('/artists')\n# def artists():\n# # TODO: replace with real data returned from querying the database\n# data=[{\n# \"id\": 4,\n# \"name\": \"Guns N Petals\",\n# }, {\n# \"id\": 5,\n# \"name\": \"Matt Quevedo\",\n# }, {\n# \"id\": 6,\n# \"name\": \"The Wild Sax Band\",\n# }]\n# return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists')\ndef artists():\n artistInfo = Artist.query.order_by(Artist.id).all()\n data = []\n for artist in artistInfo:\n record = {\n 'id': artist.id,\n 'name': artist.name\n }\n data.append(record)\n return render_template('pages/artists.html', artists=data)\n\n\n# @app.route('/artists')\n# def artists():\n# data = Artist.query.order_by(Artist.id).all()\n# for i in data:\n# currentID = i.id\n# upcoming_shows = Show.query.filter(Show.start_time > datetime.now()).filter(Show.artist_id==currentID).count()\n# i.num_upcoming_shows = upcoming_shows\n# return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n search_term=request.form.get('search_term', '')\n response = {\n 'data': Artist.query.filter(Artist.name.ilike(f'%{search_term}%')).all()\n }\n response['count'] = len(response['data'])\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n# @app.route('/artists/search', methods=['POST'])\n# def search_artists():\n# # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n# # seach for \"A\" should return \"Guns N Petals\", \"Matt Quevado\", and \"The Wild Sax Band\".\n# # search for \"band\" should return \"The Wild Sax Band\".\n# response={\n# \"count\": 1,\n# \"data\": [{\n# \"id\": 4,\n# \"name\": \"Guns N Petals\",\n# \"num_upcoming_shows\": 0,\n# }]\n# }\n# return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n artistData = Artist.query.filter_by(id = artist_id).all()\n artistData = artistData[0]\n data = {\n 'id': artistData.id,\n 'name': artistData.name,\n 'genres': ''.join(list(filter(lambda x : x!= '{' and x!='}', artistData.genres ))).split(','),\n 'city': artistData.city,\n 'state': artistData.state,\n 'phone': artistData.phone,\n 'website': artistData.website,\n 'facebook_link': artistData.facebook_link,\n 'seeking_venue': artistData.seeking_venue,\n 'seeking_description': artistData.seeking_description,\n 'image_link': artistData.image_link\n }\n past_shows = Show.query.filter(pShow.start_time < datetime.now()).filter(Show.artist_id==artist_id).all()\n data['past_shows'] = [show.get_venueInfo() for show in past_shows]\n upcoming_shows = Show.query.filter(Show.start_time > datetime.now()).filter(Show.artist_id==artist_id).all()\n data['upcoming_shows'] = [show.get_venueInfo() for show in upcoming_shows]\n data['past_shows_count'] = len(data['past_shows'])\n data['upcoming_shows_count'] = len(data['upcoming_shows'])\n return render_template('pages/show_artist.html', artist=data)\n\n# @app.route('/artists/')\n# def show_artist(artist_id):\n# # shows the venue page with the given venue_id\n# # TODO: replace with real venue data from the venues table, using venue_id\n# data1={\n# \"id\": 4,\n# \"name\": \"Guns N Petals\",\n# \"genres\": [\"Rock n Roll\"],\n# \"city\": \"San Francisco\",\n# \"state\": \"CA\",\n# \"phone\": \"326-123-5000\",\n# \"website\": \"https://www.gunsnpetalsband.com\",\n# \"facebook_link\": \"https://www.facebook.com/GunsNPetals\",\n# \"seeking_venue\": True,\n# \"seeking_description\": \"Looking for shows to perform at in the San Francisco Bay Area!\",\n# \"image_link\": \"https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80\",\n# \"past_shows\": [{\n# \"venue_id\": 1,\n# \"venue_name\": \"The Musical Hop\",\n# \"venue_image_link\": \"https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60\",\n# \"start_time\": \"2019-05-21T21:30:00.000Z\"\n# }],\n# \"upcoming_shows\": [],\n# \"past_shows_count\": 1,\n# \"upcoming_shows_count\": 0,\n# }\n# data2={\n# \"id\": 5,\n# \"name\": \"Matt Quevedo\",\n# \"genres\": [\"Jazz\"],\n# \"city\": \"New York\",\n# \"state\": \"NY\",\n# \"phone\": \"300-400-5000\",\n# \"facebook_link\": \"https://www.facebook.com/mattquevedo923251523\",\n# \"seeking_venue\": False,\n# \"image_link\": \"https://images.unsplash.com/photo-1495223153807-b916f75de8c5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=334&q=80\",\n# \"past_shows\": [{\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"venue_image_link\": \"https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80\",\n# \"start_time\": \"2019-06-15T23:00:00.000Z\"\n# }],\n# \"upcoming_shows\": [],\n# \"past_shows_count\": 1,\n# \"upcoming_shows_count\": 0,\n# }\n# data3={\n# \"id\": 6,\n# \"name\": \"The Wild Sax Band\",\n# \"genres\": [\"Jazz\", \"Classical\"],\n# \"city\": \"San Francisco\",\n# \"state\": \"CA\",\n# \"phone\": \"432-325-5432\",\n# \"seeking_venue\": False,\n# \"image_link\": \"https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80\",\n# \"past_shows\": [],\n# \"upcoming_shows\": [{\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"venue_image_link\": \"https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80\",\n# \"start_time\": \"2035-04-01T20:00:00.000Z\"\n# }, {\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"venue_image_link\": \"https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80\",\n# \"start_time\": \"2035-04-08T20:00:00.000Z\"\n# }, {\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"venue_image_link\": \"https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80\",\n# \"start_time\": \"2035-04-15T20:00:00.000Z\"\n# }],\n# \"past_shows_count\": 0,\n# \"upcoming_shows_count\": 3,\n# }\n# data = list(filter(lambda d: d['id'] == artist_id, [data1, data2, data3]))[0]\n# return render_template('pages/show_artist.html', artist=data)\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n form = ArtistForm(request.form)\n artist = Artist.query.filter_by(id=artist_id).first_or_404()\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n# @app.route('/artists//edit', methods=['GET'])\n# def edit_artist(artist_id):\n# form = ArtistForm()\n# artist={\n# \"id\": 4,\n# \"name\": \"Guns N Petals\",\n# \"genres\": [\"Rock n Roll\"],\n# \"city\": \"San Francisco\",\n# \"state\": \"CA\",\n# \"phone\": \"326-123-5000\",\n# \"website\": \"https://www.gunsnpetalsband.com\",\n# \"facebook_link\": \"https://www.facebook.com/GunsNPetals\",\n# \"seeking_venue\": True,\n# \"seeking_description\": \"Looking for shows to perform at in the San Francisco Bay Area!\",\n# \"image_link\": \"https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80\"\n# }\n# # TODO: populate form with fields from artist with ID \n# return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n artist = Artist.query.filter_by(id=artist_id).first_or_404()\n form = ArtistForm(request.form)\n if form.validate():\n try:\n artist.name = form.name.data\n artist.city = form.city.data\n artist.state = form.state.data\n artist.phone = form.phone.data\n artist.genres = form.genres.choices\n artist.facebook_link = form.facebook_link.data\n artist.image_link = form.image_link.data\n artist.website = form.website.data\n artist.seeking_venue = form.seeking_venue.data\n artist.seeking_description = form.seeking_description.data\n db.session.commit()\n flash('Artist ' + artist.name + ' was successfully edited!')\n except ValueError:\n db.session.rollback()\n flash('Error! Artist ' + artist.name + ' could not be listed.')\n else:\n message = []\n for field, errors in form.errors.items():\n message.append(form[field].label + ', '.join(errors))\n flash('Errors: ' + '|'.join(message))\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n# @app.route('/artists//edit', methods=['POST'])\n# def edit_artist_submission(artist_id):\n# # TODO: take values from the form submitted, and update existing\n# # artist record with ID using the new attributes\n\n# return redirect(url_for('show_artist', artist_id=artist_id))\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n form = VenueForm(request.form)\n venue = Venue.query.filter_by(id=venue_id).first_or_404()\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n# @app.route('/venues//edit', methods=['GET'])\n# def edit_venue(venue_id):\n# form = VenueForm()\n# venue={\n# \"id\": 1,\n# \"name\": \"The Musical Hop\",\n# \"genres\": [\"Jazz\", \"Reggae\", \"Swing\", \"Classical\", \"Folk\"],\n# \"address\": \"1015 Folsom Street\",\n# \"city\": \"San Francisco\",\n# \"state\": \"CA\",\n# \"phone\": \"123-123-1234\",\n# \"website\": \"https://www.themusicalhop.com\",\n# \"facebook_link\": \"https://www.facebook.com/TheMusicalHop\",\n# \"seeking_talent\": True,\n# \"seeking_description\": \"We are on the lookout for a local artist to play every two weeks. Please call us.\",\n# \"image_link\": \"https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60\"\n# }\n# # TODO: populate form with values from venue with ID \n# return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n venue = Venue.query.filter_by(id=venue_id).first_or_404()\n form = VenueForm(request.form)\n if form.validate():\n try:\n venue.name = form.name.data\n venue.city = form.city.data\n venue.state = form.state.data\n venue.phone = form.phone.data\n venue.genres = form.genres.choices\n venue.facebook_link = form.facebook_link.data\n venue.image_link = form.image_link.data\n venue.website = form.website.data\n venue.seeking_talent = form.seeking_talent.data\n venue.seeking_description = form.seeking_description.data\n db.session.commit()\n flash('Venue ' + venue.name + ' was successfully edited!')\n except ValueError:\n db.session.rollback()\n flash('Error! Venue ' + venue.name + ' could not be listed.')\n else:\n message = []\n for field, errors in form.errors.items():\n message.append(form[field].label + ', '.join(errors))\n flash('Errors: ' + '|'.join(message))\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n# @app.route('/venues//edit', methods=['POST'])\n# def edit_venue_submission(venue_id):\n# # TODO: take values from the form submitted, and update existing\n# # venue record with ID using the new attributes\n# return redirect(url_for('show_venue', venue_id=venue_id))\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm(request.form)\n return render_template('forms/new_artist.html', form=form)\n\n# @app.route('/artists/create', methods=['POST'])\n# def create_artist_submission():\n# # called upon submitting the new artist listing form\n# # TODO: insert form data as a new Venue record in the db, instead\n# # TODO: modify data to be the data object returned from db insertion\n\n# # on successful db insert, flash success\n# flash('Artist ' + request.form['name'] + ' was successfully listed!')\n# # TODO: on unsuccessful db insert, flash an error instead.\n# # e.g., flash('An error occurred. Artist ' + data.name + ' could not be listed.')\n# return render_template('pages/home.html')\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n error = False\n data = request.form\n aname = data['name']\n acity = data['city']\n astate = data['state']\n aphone = data['phone']\n agenres = request.form.getlist('genres')\n afb_link = data['facebook_link']\n aimage_link = data['image_link']\n awebsite = data['website']\n if data['seeking_venue'] == 'True':\n aseeking_venue = True\n else:\n aseeking_venue = False\n aseeking_description = data['seeking_description']\n try:\n db.session.add(Artist(\n city=acity,\n state=astate,\n name=aname,\n phone=aphone,\n facebook_link=afb_link,\n genres=agenres,\n seeking_venue=aseeking_venue,\n seeking_description=aseeking_description,\n website=awebsite,\n image_link=aimage_link\n ))\n except:\n error = True\n finally:\n if not error:\n db.session.commit()\n flash('Artist ' + request.form['name'] +\n ' was successfully listed!')\n else:\n flash('An error occurred. Artist ' +\n aname + ' could not be listed.')\n db.session.rollback()\n return render_template('pages/home.html')\n\n# @app.route('/artists/create', methods=['POST'])\n# def create_artist_submission():\n# error = False\n# try:\n# newArtist=request.form.get('form', '')\n# db.session.add(newArtist)\n# db.session.commit()\n# except:\n# error = True\n# db.session.rollback()\n# print(sys.exc_info())\n# finally:\n# db.session.close()\n# if error:\n# flash('An error occurred. Artist ' + request.form['name'] + ' could not be listed.')\n# else:\n# flash('Artist ' + request.form['name'] + ' was successfully listed!')\n# return render_template('pages/home.html')\n\n# Shows\n# ----------------------------------------------------------------\n@app.route('/shows')\ndef shows():\n result = []\n shows = Show.query.join(Venue, Show.venue_id == Venue.id).join(Artist, Artist.id == Show.artist_id).all()\n for show in shows:\n showObj = {\"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": show.artist.name,\n \"artist_image_link\": show.artist.image_link,\n \"start_time\": str(show.start_time)\n }\n result.append(showObj)\n return render_template('pages/shows.html', shows=result)\n\n# @app.route('/shows')\n# def shows():\n# # displays list of shows at /shows\n# # TODO: replace with real venues data.\n# # num_shows should be aggregated based on number of upcoming shows per venue.\n# data=[{\n# \"venue_id\": 1,\n# \"venue_name\": \"The Musical Hop\",\n# \"artist_id\": 4,\n# \"artist_name\": \"Guns N Petals\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80\",\n# \"start_time\": \"2019-05-21T21:30:00.000Z\"\n# }, {\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"artist_id\": 5,\n# \"artist_name\": \"Matt Quevedo\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1495223153807-b916f75de8c5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=334&q=80\",\n# \"start_time\": \"2019-06-15T23:00:00.000Z\"\n# }, {\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"artist_id\": 6,\n# \"artist_name\": \"The Wild Sax Band\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80\",\n# \"start_time\": \"2035-04-01T20:00:00.000Z\"\n# }, {\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"artist_id\": 6,\n# \"artist_name\": \"The Wild Sax Band\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80\",\n# \"start_time\": \"2035-04-08T20:00:00.000Z\"\n# }, {\n# \"venue_id\": 3,\n# \"venue_name\": \"Park Square Live Music & Coffee\",\n# \"artist_id\": 6,\n# \"artist_name\": \"The Wild Sax Band\",\n# \"artist_image_link\": \"https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80\",\n# \"start_time\": \"2035-04-15T20:00:00.000Z\"\n# }]\n# return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n# @app.route('/shows/create', methods=['POST'])\n# def create_show_submission():\n# # called to create new shows in the db, upon submitting new show listing form\n# # TODO: [COMPLETED] insert form data as a new Show record in the db, instead\n# error = False\n# date_format = '%Y-%m-%d %H:%M:%S'\n# try:\n# show = Show()\n# show.artist_id = request.form['artist_id']\n# show.venue_id = request.form['venue_id']\n# show.start_time = datetime.strptime(request.form['start_time'], date_format)\n# db.session.add(show)\n# db.session.commit()\n# except Exception as e:\n# error = True\n# print(f'Error ==> {e}')\n# db.session.rollback()\n# finally:\n# db.session.close()\n# if error: flash('An error occurred. Show could not be listed.')\n# else: flash('Show was successfully listed!')\n# return render_template('pages/home.html')\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n error = False\n data = request.form\n sstart_time = str(data['start_time'])\n sartist_id = data['artist_id']\n svenue_id = data['venue_id']\n try:\n newShow = Show(\n artist_id = sartist_id,\n venue_id = svenue_id,\n start_time = sstart_time\n )\n db.session.add(newShow)\n db.session.commit()\n except:\n error = True\n finally:\n db.session.close()\n if not error:\n db.session.commit()\n flash('Show was successfully listed!')\n else:\n flash('An error occurred. Show could not be listed.')\n db.session.rollback()\n return render_template('pages/home.html')\n\n\n# @app.route('/shows/create', methods=['POST'])\n# def create_show_submission():\n# # called to create new shows in the db, upon submitting new show listing form\n# # TODO: insert form data as a new Show record in the db, instead\n\n# # on successful db insert, flash success\n# flash('Show was successfully listed!')\n# # TODO: on unsuccessful db insert, flash an error instead.\n# # e.g., flash('An error occurred. Show could not be listed.')\n# # see: http://flask.pocoo.org/docs/1.0/patterns/flashing/\n# return render_template('pages/home.html')\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","sub_path":"projects/01_fyyur/starter_code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":37977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"503166409","text":"from subprocess import Popen, PIPE, STDOUT\nimport re\n\ndef mp3skull(usrSearch, db, browser, songNum):\n\tbrowser.open(db)\n\tsearchForm = browser.get_form(action='/search_db.php')\n\tsearchForm['q'].value = usrSearch\n\tbrowser.submit_form(searchForm)\n\n\tsongNames = browser.select('b')\n\tsongName = re.sub('[\\s]', '', str(songNames[songNum]))\n\n\tsongLinks = browser.select('.show1')\n\tlink = re.search('(http).+(.mp3)', str(songLinks)).group(0)\n\n\treturn (link, songName)\n\ndef soundowl(usrSearch, db, browser, songNum):\n\tbrowser.open('%ssearch?q=%s' % (db, usrSearch))\n\thtml = str(browser.parsed)\n\tsongNames = browser.select('a.internal')\n\n\tsongTitle = re.search('(?:internal\">).+?(?=
)', str(songNames[songNum + 7]))\n\tsongArtist = re.search('(?:internal\">).+?(?=)', str(songNames[songNum + 6]))\n\n\tsongName = '%s - %s' % (songArtist[9:], songTitle[9:])\n\n\tsongLinks = browser.select('a')\n\tbrowser.follow_link(songLinks[9])\n\tlink = re.search('(http).+(.mp3)', str(browser.parsed)).group(0)\n\n\treturn (link, songName)\n\ndef grooveshark(usrSearch, db, browser, songNum):\n\tslave = Popen(['ruby', 'grooveshark.rb'], stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n\t\n\tcode = \"\"\"\n\trequire 'grooveshark'\n\tclient = Grooveshark::Client.new\n\tsession = client.session\n\n\tsongs = client.search_songs({search})\n\tsong = songs[{num}]\n\turl = client.get_song_url(song)\n\tputs(url)\n\n\tSTDOUT.flush\n\t\"\"\".format(search=usrSearch, num=songNum)\n\n\tslave.stdin.write(code)\n\n\twhile True:\n\t\tline = slave.stdout.readline().rstrip()\n\t\tif line == '[end]':\n\t\t\tbreak\n","sub_path":"databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"184670260","text":"from typing import Tuple\n\nimport tensorflow as tf\n\nfrom layers.basic_lstm_cell_with_dropout import (\n basic_lstm_cell_with_dropout,\n)\n\n\ndef DynamicBiDirLSTM(\n input_: tf.Tensor,\n seqlen: tf.Tensor,\n input_dropout: float,\n state_size: int,\n embedding_table: tf.Tensor, # np.ndarray,\n dtype: tf.DType = tf.float32,\n ) -> Tuple[tf.Tensor, tf.nn.rnn_cell.LSTMStateTuple]:\n\n batch_size = tf.shape(input_)[0]\n\n input_with_embedding = tf.nn.embedding_lookup(\n params=embedding_table,\n ids=input_,\n )\n\n input_with_dropout = tf.nn.dropout(\n x=input_with_embedding,\n keep_prob=(1.0 - input_dropout),\n )\n\n with tf.variable_scope('forward'):\n fw_lstm_cell, fw_init_state = basic_lstm_cell_with_dropout(\n state_size=state_size,\n batch_size=batch_size,\n state_dropout=0.0,\n output_dropout=0.0,\n dtype=dtype,\n )\n\n with tf.variable_scope('backward'):\n bw_lstm_cell, bw_init_state = basic_lstm_cell_with_dropout(\n state_size=state_size,\n batch_size=batch_size,\n state_dropout=0.0,\n output_dropout=0.0,\n dtype=dtype,\n )\n\n with tf.variable_scope('bidir_lstm'):\n _, output_state = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=fw_lstm_cell,\n cell_bw=bw_lstm_cell,\n inputs=input_with_dropout,\n sequence_length=seqlen,\n initial_state_fw=fw_init_state,\n initial_state_bw=bw_init_state,\n dtype=dtype,\n )\n concated_h = tf.concat(\n values=[output_state[0].h, output_state[1].h],\n axis=1,\n name='concated_h',\n )\n concated_c = tf.concat(\n values=[output_state[0].c, output_state[1].c],\n axis=1,\n name='concated_c',\n )\n concated_output_state = tf.contrib.rnn.LSTMStateTuple(\n *(concated_h, concated_c),\n )\n latent = tf.identity(concated_h, name='latent_vector')\n return latent, concated_output_state\n","sub_path":"text_autoencoder/encoders/ae/dynamic_bidir_lstm.py","file_name":"dynamic_bidir_lstm.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"510089584","text":"\n\ndb.get_to_db('alist','Forrest Weinberg')\n\nimport zlib\nfrom PIL import Image\nfrom door_lock_db import DoorLockDB\ndb = DoorLockDB()\nfrom base64 import b64encode, b64decode\nimport sys\nimport json\n\nimage = open(\"Professional Headshot (Cropped).jpg\", 'rb')\nread_image = image.read()\nimage_hex = read_image.hex()\nimage_json = json.dumps(image_hex)\ndb.upload_headshot_image('m', image_json)\n\nim = Image.open(\"Professional Headshot (Cropped).jpg\")\n\nimg_bytes = im.tobytes()\n\nresponse = db.upload_headshot_image('--m', img_bytes)\n\nb64_image = b64encode(im.tobytes())\nb64_str_image = str(b64_image)\nb64_json = json.dumps(b64_str_image)\n(width, height) = (200, 200)\n\n# im_resized = im.resize((width, height))\nim.thumbnail(200)\n\nim.save('im_resized.jpg', quality=95, optimize=True)\n\nresized_im = Image.open('im_resized.jpg')\nb64_resized_image = b64encode(resized_im.tobytes())\nb64_str_resized_image = str(b64_resized_image)\n\nprint(f'Here is a comparison of the the size:\\nOriginal: {sys.getsizeof(b64_str_image)}\\nNew File: {sys.getsizeof(b64_str_resized_image)}')\n\nwith open('Professional Headshot (Cropped).jpg', 'rb') as image:\n im = image.read()\n\ncompressed_image = zlib.compress(im, 9)\n\nb64_image = b64encode(im2.tobytes())\nb64_str = str(b64_image)\nb64_json = json.dumps(str(_))\n\nim.save('working_image.png', format='PNG')\n\ndb.post_new_doc('https://doorlock-be53.restdb.io/media', b64_json)\ndb.post_new_doc('https://doorlock-be53.restdb.io/media', b64_str)\n\n\nimport requests\nlatest_file = './image.jpeg'\nheaders = {'x-apikey': '{MY_API_KEY_HERE}'}\nurl = \"https://{MYDATABASE_NAME_HERE}.restdb.io/media\"\nfiles = {'file': open(latest_file, 'rb')}\nr = requests.post(url, files=files, headers=headers)\nprint(r.status_code, r.text)\n\n\nsys.getsizeof(im_base64)\n\n'5d6701b79ce4772e00006715'\n\ndb.update_doc('alist','5d6701b79ce4772e00006715', {'image':'5d66dbd49ce4772e000063b9'})\ndb.get_to_db('alist')","sub_path":"misc details.py","file_name":"misc details.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"271877552","text":"#!/usr/bin/env python\n\"\"\"Process Huang's wireframe dataset for L-CNN network\nUsage:\n dataset/wireframe.py \n dataset/wireframe.py (-h | --help )\nExamples:\n python3 dataset/wireframe.py /datadir/wireframe data/wireframe\nArguments:\n Original data directory\n Directory of the output\nOptions:\n -h --help Show this screen.\n\"\"\"\n\nimport os\nimport xml.etree.ElementTree as ET\nimport sys\nimport json\nfrom itertools import combinations\nimport glob\nfrom skimage import io\nimport copy\nimport cv2\nimport re\nimport numpy as np\nimport skimage.draw\nimport matplotlib.pyplot as plt\nfrom docopt import docopt\nfrom scipy.ndimage import zoom\n\ntry:\n sys.path.append(\".\")\n sys.path.append(\"..\")\n from lcnn.utils import parmap\nexcept Exception:\n raise\n\n\ndef inrange(v, shape):\n return 0 <= v[0] < shape[0] and 0 <= v[1] < shape[1]\n\n\ndef to_int(x):\n return tuple(map(int, x))\n\n\ndef save_heatmap(prefix, image, input_annotation):\n annotation = copy.deepcopy(input_annotation)\n im_rescale = (512, 512)\n heatmap_scale = (128, 128)\n fx, fy = heatmap_scale[0] / image.shape[0], heatmap_scale[1] / image.shape[1]\n center = np.zeros((1,) + heatmap_scale, dtype=np.float32) # [1,128,128]\n corner = np.zeros((1,) + heatmap_scale, dtype=np.float32) # [1,128,128]\n corner_offset = np.zeros((1, 2) + heatmap_scale, dtype=np.float32) # [1,2,128,128]\n corner_bin_offset=np.zeros((1, 2) + heatmap_scale, dtype=np.float32) # [1,2,128,128]\n\n for i in annotation:\n annotation[i]=[[j[0]*fx,j[1]*fy] for j in annotation[i]] #[[np.clip(j[0]*fx,0,heatmap_scale[0] - 1e-4),np.clip(j[1] * fy, 0, heatmap_scale[1] - 1e-4)] for j in annotation[i]]\n center_on_heatmap=[i[0] * fx,i[1] * fy]\n if 0<=int(center_on_heatmap[0])\"]\n data_output = args[\"\"]\n\n os.makedirs(data_output, exist_ok=True)\n for batch in [\"train\", \"valid\"]:\n filelist = glob.glob(f\"{data_root}/{batch}/*.xml\")\n filelist.sort()\n def handle(xmlname):\n iname = xmlname.replace(\"xml\", \"jpg\")\n image = io.imread(iname).astype(np.float32)[:, :, :3]\n image_size = image.shape\n prefix = xmlname.split(\".\")[-2].split('/')[-1]\n os.makedirs(os.path.join(data_output, batch), exist_ok=True)\n path = os.path.join(data_output, batch, prefix)\n try:\n tree = ET.parse(xmlname)\n root = tree.getroot()\n except:\n with open(xmlname) as f:\n xml=f.read()\n root = ET.fromstring(\"\" + xml + \"\")\n annotation={}\n for child_of_root in root.iter(tag='gate_corners'):\n corners = []\n tmp=child_of_root.find('top_left').text.split(',')\n assert len(tmp)==2\n tmp=[image_size[0]-float(tmp[1]),float(tmp[0])]\n if image_size[0]>tmp[0]>=0 and image_size[1]>tmp[1]>=0:\n corners.append(tmp)\n\n tmp = child_of_root.find('top_right').text.split(',')\n assert len(tmp) == 2\n tmp = [image_size[0] - float(tmp[1]), float(tmp[0])]\n if image_size[0]>tmp[0]>=0 and image_size[1]>tmp[1]>=0:\n corners.append(tmp)\n\n tmp = child_of_root.find('bottom_right').text.split(',')\n assert len(tmp) == 2\n tmp = [image_size[0] - float(tmp[1]), float(tmp[0])]\n if image_size[0]>tmp[0]>=0 and image_size[1]>tmp[1]>=0:\n corners.append(tmp)\n\n tmp = child_of_root.find('bottom_left').text.split(',')\n assert len(tmp) == 2\n tmp = [image_size[0] - float(tmp[1]), float(tmp[0])]\n if image_size[0]>tmp[0]>=0 and image_size[1]>tmp[1]>=0:\n corners.append(tmp)\n\n tmp = child_of_root.find('center').text.split(',')\n assert len(tmp) == 2\n tmp = [image_size[0] - float(tmp[1]), float(tmp[0])]\n annotation[tuple(tmp)]=corners\n\n save_heatmap(f\"{path}_0\", image[::, ::], annotation)\n if batch != \"valid\":\n annotation1={}\n for i in annotation:\n annotation1[i[0],image_size[1]-i[1]]=[[j[0],image_size[1]-j[1]] for j in annotation[i]]\n if not save_heatmap(f\"{path}_1\", image[::, ::-1], annotation1):\n return\n\n annotation2={}\n for i in annotation:\n annotation2[image_size[0]-i[0], i[1]]=[[image_size[0]-j[0], j[1]] for j in annotation[i]]\n if not save_heatmap(f\"{path}_2\", image[::-1, ::], annotation2):\n return\n\n annotation3 ={}\n for i in annotation:\n annotation3[image_size[0]-i[0], image_size[1]-i[1]]=[[image_size[0]-j[0], image_size[1]-j[1]] for j in annotation[i]]\n if not save_heatmap(f\"{path}_3\", image[::-1, ::-1], annotation3):\n return\n print(\"Finishing\", os.path.join(data_output, batch, prefix))\n\n parmap(handle, filelist, 1)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Backups/lcnn/dataset/wireframe.py","file_name":"wireframe.py","file_ext":"py","file_size_in_byte":6489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"487706904","text":"# encoding=utf-8\n\nfrom twisted.internet import reactor, protocol\n\nHOST = 'localhost'\nPORT = 12345\n\n\nclass TSClientProtocol(protocol.Protocol):\n def dataReceived(self, data):\n print(data.decode())\n self.sendData()\n\n def connectionMade(self):\n self.sendData()\n\n def sendData(self):\n data = input('> ')\n if data:\n print('sending data %s' % data)\n self.transport.write(data.encode())\n else:\n self.transport.loseConnection()\n\n\nclass TSClientFactory(protocol.ClientFactory):\n protocol = TSClientProtocol\n clientConnectionLost = clientConnectionFailed = \\\n lambda self, connector, reason: reactor.stop()\n\n\nreactor.connectTCP(HOST, PORT, TSClientFactory())\nreactor.run()\n","sub_path":"chapter-02/twistedTcpClient.py","file_name":"twistedTcpClient.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"502482110","text":"# ==============================================================================\n# original program\nprint('original program')\nx = int(input('Enter the first number: '))\ny = int(input('Enter the second number: '))\nprint('first number / second number = ', x / y)\n# Output:\n# Enter the first number: 5\n# Enter the second number: 0\n# Traceback (most recent call last):\n# File \"./tmp.py\", line 4, in \n# print('first number / second number = ', x / y)\n# ZeroDivisionError: division by zero\n\n# ==============================================================================\n# advoid error and catch exception\nprint('advoid error and catch exception')\ntry:\n x = int(input('Enter the first number: '))\n y = int(input('Enter the second number: '))\n print('first number / second number = ', x / y)\nexcept ZeroDivisionError:\n print(\"The second number can't be zero\")\n# Output:\n# Enter the first number: 5\n# Enter the second number: 0\n# The second number can't be zero\n\n# ==============================================================================\n# raise no arguments\nprint('raise no arguments')\n\n\nclass MuffledCalculator(object):\n '''\n Muffled Calculator\n '''\n muffle = False\n\n def calc(self, expr):\n '''\n Calculation function\n '''\n try:\n print(expr, ' = ', eval(expr))\n except ZeroDivisionError:\n if self.muffle:\n print('Division by zero is illegal')\n else:\n raise # no argument\n\n\n# testing\ncalculator = MuffledCalculator()\ncalculator.calc('10/2')\n# Output: 10/2 = 5.0\n\ncalculator.calc('10/0')\n# Output:\n# Traceback (most recent call last):\n# File \"./tmp.py\", line 18, in \n# calculator.calc('10/0')\n# File \"./tmp.py\", line 7, in calc\n# return eval(expr)\n# File \"\", line 1, in \n# ZeroDivisionError: division by zero\n# => raise with no argument\n\ncalculator.muffle = True # set muffle flag\ncalculator.calc('10/0')\n# Output: Division by zero is illegal\n\n# ==============================================================================\n# more than one except clause\nprint('more than one except clause')\ntry:\n x = int(input('Enter the first number: '))\n y = int(input('Enter the second number: '))\n print('first number / second number = ', x / y)\nexcept ZeroDivisionError:\n print(\"The second number can't be zero\")\nexcept TypeError:\n print(\"That wasn't a number, was it?\")\nexcept ValueError:\n print('Invalid literal')\n\n# Output:\n# Enter the first number: 10\n# Enter the second number: 0\n# The second number can't be zero\n# Output:\n# Enter the first number: abc\n# Invalid literal\n\n# ==============================================================================\n# catching two exceptions\nprint('catching two exceptions')\ntry:\n x = int(input('Enter the first number: '))\n y = int(input('Enter the second number: '))\n print('first number / second number = ', x / y)\nexcept (ZeroDivisionError, TypeError, ValueError):\n print(\"Your numbers were bogus... \")\n\n# Output:\n# Enter the first number: 10\n# Enter the second number: 0\n# Your numbers were bogus...\n\n# ==============================================================================\n# catching the object\nprint('catching the object')\ntry:\n x = int(input('Enter the first number: '))\n y = int(input('Enter the second number: '))\n print('first number / second number = ', x / y)\nexcept (ZeroDivisionError, TypeError, ValueError) as e:\n print(e)\n\n# Output:\n# Enter the first number: 10\n# Enter the second number: 0\n# division by zero\n\n# Enter the first number: abc\n# invalid literal for int() with base 10: 'abc'\n\n# ==============================================================================\n# real catchall\nprint('real catchall')\ntry:\n x = int(input('Enter the first number: '))\n y = int(input('Enter the second number: '))\n print('first number / second number = ', x / y)\nexcept:\n print('something wrong... ')\n\n# Output:\n# Enter the first number: 10\n# Enter the second number: 0\n# something wrong...\n\n# Enter the first number:\n# ^C\n# something wrong...\n\n# ==============================================================================\n# when all is well\nprint('when all is well')\nwhile True:\n try:\n x = int(input('Enter the first number: '))\n y = int(input('Enter the second number: '))\n print('first number / second number = ', x / y)\n except (ZeroDivisionError, TypeError, ValueError) as e:\n print('Invalid input. Please try again.')\n else:\n break\n\n# Output:\n# Enter the first number: abc\n# Invalid input. Please try again.\n# Enter the first number: 10\n# Enter the second number: 0\n# Invalid input. Please try again.\n# Enter the first number: 10\n# Enter the second number: 2\n# first number / second number = 5.0\n\n# ==============================================================================\n# finally\nprint('finally')\nx = None\ntry:\n x = 1 / 0\nexcept:\n print('Unknown variable')\nelse:\n print('That went well')\nfinally:\n print('Cleaning up...')\n del x\n","sub_path":"beginning-python/08/08-01-exception-catch.py","file_name":"08-01-exception-catch.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"95522183","text":"import torch\nimport torch.nn as nn\nfrom torchvision import models\nimport torch.nn.functional as F\n\nclass VGG19(nn.Module):\n def __init__(self):\n super(VGG19, self).__init__()\n source_model = models.vgg19(pretrained=True).features\n\n # replace in-place relus\n for name, layer in source_model.named_children():\n if isinstance(layer, nn.ReLU):\n setattr(source_model, name, nn.ReLU(inplace=False))\n # if isinstance(layer, nn.MaxPool2d): # did not give good results\n # setattr(source_model, name, nn.AvgPool2d(\n # kernel_size=2,\n # stride=2,\n # padding=0)\n # )\n\n # get the feature layers\n features = list(source_model)\n # set to eval mode\n self.features = nn.ModuleList(features)\n # freeze layers\n for parameter in self.features.parameters():\n parameter.requires_grad = False\n\n def forward(self, x):\n results = []\n needed_layers = {1, 6, 11, 20, 29, 31}\n for ii, model in enumerate(self.features):\n x = model(x)\n if ii in needed_layers:\n results.append(x)\n # (style_layers, content_layers)\n return results[:-1], list(results[-1])\n\n# content loss\ndef get_content_loss(base_content, target):\n return F.mse_loss(base_content, target)\n\ndef gram_matrix(input):\n a, b, c, d = input.size() # a=batch size(=1)\n # b=number of feature maps\n # (c,d)=dimensions of a f. map (N=c*d)\n\n features = input.view(a * b, c * d) # resise F_XL into \\hat F_XL\n\n G = torch.mm(features, features.t()) # compute the gram product\n\n # we 'normalize' the values of the gram matrix\n # by dividing by the number of element in each feature maps.\n return G.div(a * b * c * d)\n\ndef get_style_loss(base_style, gram_target):\n G = gram_matrix(base_style)\n loss = F.mse_loss(G, gram_target)\n return loss\n\nif __name__ == \"__main__\":\n vgg_model = VGG19()\n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"588085091","text":"from django.http import JsonResponse\nfrom django.views.generic import View\nfrom django.db import connection, DatabaseError\nfrom django.shortcuts import render_to_response\n\nfrom myBlog.core.models import Post\n\n\ndef status(request):\n try:\n cursor = connection.cursor()\n cursor.execute(\"SELECT 1\")\n cursor.fetchone()\n cursor.close()\n database_status = 'OK'\n except DatabaseError:\n database_status = 'UNAVAILABLE'\n\n data = {\n 'database_status': database_status,\n }\n return JsonResponse(data)\n\n\nclass PostView(View):\n def get(self, request, post_id):\n post = Post.objects.get(id=post_id)\n\n context = {'post': post}\n return render_to_response('post.html', context)\n","sub_path":"myBlog/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444049870","text":"\"\"\"\nThese tests cover the Hacker News homepage\n\"\"\"\n\nimport pytest\nfrom delayed_assert import expect, assert_expectations\nfrom basetest import HomeTest, LoggedInTest\nfrom pages.home import HomePage\nfrom pages.nav import NavBar\n\n\nclass TestLoggedoutHomepage(HomeTest):\n\n\n def test_HomepageLoadsLoggedOut(self):\n nav = NavBar(self.driver)\n home_page = HomePage(self.driver)\n expect(nav.verify_menu_header_displayed())\n expect(home_page.verify_posts_displayed())\n assert_expectations()\n\n\nclass TestLoggedinHomepage(LoggedInTest):\n\n def test_HomepageLoadsLoggedIn(self):\n nav = NavBar(self.driver)\n home_page = HomePage(self.driver)\n expect(nav.verify_username_displayed())\n expect(nav.verify_menu_header_displayed())\n expect(home_page.verify_posts_displayed())\n assert_expectations()","sub_path":"tests/test_home.py","file_name":"test_home.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"366790264","text":"from model import *\nimport numpy as np\nimport keras\nfrom keras.callbacks import ModelCheckpoint\nimport random\nimport os\nimport cv2 as cv\nfrom imutils import paths\nfrom keras.preprocessing.image import img_to_array\nimport matplotlib.pyplot as plt\n\nnorm_size=256\ndef load_Train_data(imagefolder,labelfolder):\n print(\"[INFO] loading images...\")\n imagedata = []\n labeldata = []\n # grab the image paths and randomly shuffle them\n imagePaths = os.listdir(imagefolder)\n\n random.seed(42)\n random.shuffle(imagePaths)\n # loop over the input images\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n # print(imagePath)\n image = cv.imread(os.path.join(imagefolder,imagePath))[:,:,0]\n image = cv.resize(image, (norm_size, norm_size))\n image = img_to_array(image)\n imagedata.append(image)\n\n label = cv.imread(os.path.join(labelfolder, imagePath))[:, :, 0]\n label = cv.resize(label, (norm_size, norm_size))\n label = img_to_array(label)\n labeldata.append(label)\n\n # scale the raw pixel intensities to the range [0, 1]\n\n imagedata = np.array(imagedata, dtype=np.float32)\n avg=np.average(imagedata);\n std=np.std(imagedata);\n out=open(\"config\",'w')\n out.write('{},{}\\n'.format(avg,std))\n out.close()\n print(avg,std)\n imagedata=(imagedata-avg)/std\n\n labeldata = np.array(labeldata, dtype=np.float32)\n labeldata[labeldata < 1] = 0;\n labeldata[labeldata >= 1] = 1;\n # labeldata[labeldata>1]=1;\n\n return imagedata,labeldata\n\nif __name__==\"__main__\":\n train=1;\n\n trainX,trainY=load_Train_data(os.path.join('D:/images','balanceimage'),os.path.join('D:/images','balancelabel'))\n print(trainX.shape)\n print(trainY.shape)\n print(np.sum(trainY==1))\n print(np.sum(trainY==0))\n print(np.sum(trainY==0)/(np.sum(trainY==0)+np.sum(trainY==1)))\n\n # cv.imshow(\"main\",trainY[1,:,:,0])\n # cv.waitKey(3000)\n\n if train:\n EPOCHS=120\n model = unet()\n model.summary()\n # model=keras.models.load_model(\"best.hdf5\",custom_objects={'bce_dice_loss': bce_dice_loss})\n model_checkpoint = ModelCheckpoint('best.hdf5', monitor='val_acc',verbose=1, save_best_only=True)\n tb_cb = keras.callbacks.TensorBoard(log_dir=\"./log\", write_images=1, histogram_freq=0)\n H=model.fit(trainX,trainY,batch_size=4,epochs=EPOCHS,validation_split=0.04,callbacks=[model_checkpoint,tb_cb])\n model.save(\"./lasted.hdf5\")\n\n plt.style.use(\"ggplot\")\n plt.figure()\n N = EPOCHS\n plt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, N), H.history[\"acc\"], label=\"train_acc\")\n plt.plot(np.arange(0, N), H.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy on foot classifier\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend(loc=\"lower left\")\n plt.savefig(\"train.png\")\n\n else:\n model=keras.models.load_model(\"best.hdf5\",custom_objects={'bce_dice_loss': bce_dice_loss})\n scores = model.evaluate(trainX, trainY, verbose=0)\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n img=np.round(model.predict(trainX[0:1,:,:,0:1]))\n img=img.astype(np.uint8)\n print(np.sum(img>0))\n img[img>0]=255\n cv.imshow(\"mian\",img[0,:,:,0])\n cv.waitKey(5000)\n","sub_path":"python/TDB_Unet/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"142505994","text":"import smtplib\n\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\n\nfrom app_celery.celery import app\n\n\n@app.task\ndef send_email(subject, from_email, to_email, template, args):\n\n html_message = render_to_string(template, args)\n try:\n send_mail(\n subject=subject,\n message=\"\",\n from_email=from_email,\n recipient_list=[to_email],\n html_message=html_message,\n )\n except smtplib.SMPTException:\n print(f\"Error while sending email to {to_email}\")\n","sub_path":"api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"603915574","text":"import re\nimport requests\nimport bs4\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport numpy\n\ndef getHTMLText (url):\n try:\n r = requests.get(url , timeout=30)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return \"\"\n\n\ndef fillUnivList(ulist,html):\n soup = BeautifulSoup(html,'html.parser')\n for tr in soup.find('tbody').children:\n if isinstance(tr,bs4.element.Tag):\n tds = tr('td')\n ulist.append([tds[0].string,tds[1].string,tds[11].string])\n return ulist\n\ndef printUnivList(ulist,num):\n tplt = '{0:^10}\\t{1:{3}^10}\\t{2:^10}'\n print(tplt.format(\"排名\",'学校','科研经费',chr(12288 )))\n for i in range(num):\n u=ulist[i]\n print(tplt.format(u[0],u[1],u[2],chr(12288)))\n\n\n\n\nif __name__ == '__main__':\n uinfo = []\n url = 'http://www.zuihaodaxue.com/zuihaodaxuepaiming2019.html'\n html = getHTMLText(url)\n flist = fillUnivList(uinfo,html)\n nn = numpy.array(flist)\n x = nn[:,1]\n y = int(nn[:,2])\n # plt(y)\n # plt.show()\n\n\n","sub_path":"爬虫学习/Rank.py","file_name":"Rank.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"526100624","text":"# MIT License\n#\n# Copyright (c) 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport os\nimport sys\n\nfrom slim.datasets import dataset_utils\nfrom slim.nets import nets_factory as network_factory\nfrom slim.preprocessing import preprocessing_factory\n\nslim = tf.contrib.slim\n\n\nclass SoftmaxClassifier(object):\n\n __model_name_map = {\n 'alexnet_v2': 'alexnet_v2',\n 'cifarnet': 'CifarNet',\n 'overfeat': 'overfeat',\n 'vgg_a': 'vgg_a',\n 'vgg_16': 'vgg_16',\n 'vgg_19': 'vgg_19',\n 'inception_v1': 'InceptionV1',\n 'inception_v2': 'InceptionV2',\n 'inception_v3': 'InceptionV3',\n 'inception_v4': 'InceptionV4',\n 'inception_resnet_v2': 'InceptionResnetV2',\n 'lenet': 'LeNet',\n 'resnet_v1_50': 'resnet_v1_50',\n 'resnet_v1_101': 'resnet_v1_101',\n 'resnet_v1_152': 'resnet_v1_152',\n 'resnet_v1_200': 'resnet_v1_200',\n 'resnet_v2_50': 'resnet_v2_50',\n 'resnet_v2_101': 'resnet_v2_101',\n 'resnet_v2_152': 'resnet_v2_152',\n 'resnet_v2_200': 'resnet_v2_200',\n 'mobilenet_v1': 'MobilenetV1',\n 'mobilenet_v1_075': 'MobilenetV1',\n 'mobilenet_v1_050': 'MobilenetV1',\n 'mobilenet_v1_025': 'MobilenetV1',\n 'nasnet_cifar': 'nasnet_cifar',\n 'nasnet_mobile': 'nasnet_mobile',\n 'nasnet_large': 'nasnet_large',\n }\n\n def __init__(self):\n\n self._model_name = None\n self._model_path = None\n\n self._has_dataset = False\n self._has_model = False\n\n self._number_of_classes = 0\n self._labels_to_names = []\n self._network_image_size = 0\n\n self._graph = None\n self._session = None\n\n self._input_image_tensor = None\n self._normalized_features_tensor = None\n self._probability_tensor = None\n\n self._feature_key = 'Features'\n\n def network_image_size(self):\n return (self._network_image_size)\n\n def _load_dataset(self, dataset_dir):\n self._has_dataset = False\n\n self._labels_to_names = dataset_utils.read_label_file(dataset_dir)\n self._number_of_classes = len(self._labels_to_names)\n\n if (self._number_of_classes > 0):\n self._has_dataset = True\n\n return (self._has_dataset)\n\n def _load_model(self, model_path, model_name, gpu_memory_fraction):\n self._has_model = False\n\n if tf.gfile.IsDirectory(model_path):\n self._model_path = tf.train.latest_checkpoint(model_path)\n else:\n self._model_path = model_path\n\n self._graph = tf.Graph()\n with self._graph.as_default():\n\n image_preprocessing_fn = preprocessing_factory.get_preprocessing(\n model_name, is_training=False)\n network_fn = network_factory.get_network_fn(\n model_name,\n num_classes=self._number_of_classes,\n is_training=False)\n self._network_image_size = network_fn.default_image_size\n\n self._input_image_tensor = tf.placeholder(tf.uint8,\n (None, None, 3), 'input')\n processed_image = image_preprocessing_fn(self._input_image_tensor,\n self._network_image_size,\n self._network_image_size)\n processed_image_tensor = tf.expand_dims(processed_image, 0)\n\n logits_tensor, end_points = network_fn(processed_image_tensor)\n\n if (self._feature_key in end_points):\n features_tensor = end_points[self._feature_key]\n self._normalized_features_tensor = tf.nn.l2_normalize(\n features_tensor, dim=1)\n #self._normalized_features_tensor = features_tensor\n\n self._probability_tensor = tf.nn.softmax(logits_tensor)\n\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=gpu_memory_fraction)\n self._session = tf.Session(\n config=tf.ConfigProto(\n gpu_options=gpu_options, log_device_placement=False))\n\n init_fn = slim.assign_from_checkpoint_fn(\n self._model_path,\n slim.get_model_variables(\n SoftmaxClassifier.__model_name_map[model_name]))\n init_fn(self._session)\n\n self._has_model = True\n self._model_name = model_name\n\n return (self._has_model)\n\n def load(self, model_path, model_name, gpu_memory_fraction):\n\n if (not self._load_dataset(model_path)):\n return (False)\n\n if (not self._load_model(model_path, model_name, gpu_memory_fraction)):\n return (False)\n\n return (True)\n\n def classify(self, input_image, use_top=5, print_results=False):\n\n class_names_probabilities = []\n\n class_probabilities = []\n try:\n feed_dict = {self._input_image_tensor: input_image}\n class_probabilities = self._session.run(\n self._probability_tensor, feed_dict=feed_dict)\n except (IOError, ValueError, IndexError) as error:\n return (class_names_probabilities)\n\n if (len(class_probabilities) == 0):\n return (class_names_probabilities)\n\n class_probabilities = class_probabilities[0, 0:]\n sorted_indices = [\n i[0] for i in sorted(\n enumerate(-class_probabilities), key=lambda x: x[1])\n ]\n\n for index in range(use_top):\n class_name = self._labels_to_names[sorted_indices[index]]\n class_probability = class_probabilities[sorted_indices[index]]\n class_names_probabilities.append([class_name, class_probability])\n if (print_results):\n print(\"Class is - \" + class_name + \" with probability - \" +\n str(class_probability))\n\n return (class_names_probabilities)\n\n def features(self, input_image):\n normalized_features = []\n try:\n feed_dict = {self._input_image_tensor: input_image}\n normalized_features = self._session.run(\n self._normalized_features_tensor, feed_dict=feed_dict)\n except (IOError, ValueError, IndexError) as error:\n normalized_features = []\n return (normalized_features)\n\n #print('normalized_features', normalized_features, len(normalized_features[0]))\n return (normalized_features)\n","sub_path":"tfface/classifier/SoftmaxClassifier.py","file_name":"SoftmaxClassifier.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"139346065","text":"# -*- encoding: utf-8 -*-\n'''\n@File : test5.py\n@Time : 2020/05/06 20:08:14\n@Author : zjm \n@Version : 3.7.6\n@Contact : 1005564803@qq.com\n@WebSite : https://github.com/sum-123/Python-.git\n'''\n# 创建一个留言板的表(ID,留言主题,留言人,留言时间)4个字段,注意,字段请用英文;\n# 完成对这个表记录的增,删,改,查询;\n# 用PyMySQL驱动方式\n\n# here put the import lib\nimport pymysql\ndb = pymysql.connect(\"localhost\",\"root\",\"qazwsx123\",\"test\" )\n \n# 使用 cursor() 方法创建一个游标对象 cursor\ncursor = db.cursor()\ncursor.execute(\"DROP TABLE IF EXISTS MESSAGEBOARD\")\n# **********创建表**********\ncreate = \"\"\"CREATE TABLE MESSAGEBOARD (\n ID CHAR(20) NOT NULL,\n THEME CHAR(20),\n NAME CHAR(20),\n MESSAGE_TIME DATE \n )\"\"\"\n \ncursor.execute(create)\n# **********增加数据*********\ninsert=\"\"\"INSERT INTO MESSAGEBOARD(ID,\n THEME, NAME, MESSAGE_TIME)\n VALUES ('1201810801', 'Python','Jack', '2020-05-06')\"\"\"\ntry:\n cursor.execute(insert)\n db.commit()\nexcept:\n db.rollback()\n#***********查询记录*********\nsearch=\"SELECT * FROM MESSAGEBOARD\"\ntry:\n # 执行SQL语句\n cursor.execute(search)\n # 获取所有记录列表\n results = cursor.fetchall()\n for row in results:\n id = row[0]\n theme = row[1]\n name = row[2]\n time = row[3]\n # 打印结果\n print (\"id = %s,theme = %s,name = %s,time = %s\" % \\\n (id, theme, name, time))\nexcept:\n print (\"Error: unable to fetch data\")\n#***********更新记录***********\nupgrade=\"UPDATE MESSAGEBOARD SET THEME ='pyhon123' \"\ntry:\n # 执行SQL语句\n cursor.execute(upgrade)\n # 提交到数据库执行\n db.commit()\nexcept:\n # 发生错误时回滚\n db.rollback()\n# *********删除记录************\ndelete = \"DELETE FROM MESSAGEBOARD WHERE THEME > %s\" % ('python123')\ntry:\n # 执行SQL语句\n cursor.execute(delete)\n # 提交修改\n db.commit()\nexcept:\n # 发生错误时回滚\n db.rollback()\n\n# 关闭数据库连接\ndb.close()\n","sub_path":"ClassTest/May06/MessageBoard.py","file_name":"MessageBoard.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"221746934","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('usersprofile/', views.users_profile, name='users_profile'),\n path('profileaccount/', views.profile_account, name='profile_account'),\n path('profilesubscription/', views.profile_subscription, name='profile_subscription'),\n path('userlogout/', views.user_logout, name=\"user_logout\"),\n path('leavefeedbacks', views.leave_feedbacks, name='leave_feedbacks'),\n]\n","sub_path":"usersprofile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"560278411","text":"import os\r\nimport tempfile\r\n\r\nfrom core.models import Ingredient, Recipe, Tag\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.test import TestCase\r\nfrom django.urls import reverse\r\nfrom exercise.serializers import RecipeDetailSerializer, RecipeSerializer\r\nfrom PIL import Image\r\nfrom rest_framework import status\r\nfrom rest_framework.test import APIClient\r\n\r\nRECIPE_URL = reverse('exercise:recipe-list')\r\n\r\n# /api/recipe/recipes\r\n# /api/recipe/recipes/1/\r\n\r\n\r\ndef image_upload_url(recipe_id):\r\n \"\"\"Return url fot recipe image upload\r\n \"\"\"\r\n return reverse('exercise:recipe-upload-image', args=[recipe_id])\r\n\r\n\r\ndef detail_url(recipe_id):\r\n \"\"\"Return recipe detail URL\r\n \"\"\"\r\n return reverse('exercise:recipe-detail', args=[recipe_id])\r\n\r\n\r\ndef sample_recipe(user, **params):\r\n \"\"\"Create and return a sample recipe\r\n \"\"\"\r\n defaults = {\r\n 'title': 'Sample recipe',\r\n 'time_minutes': 10,\r\n 'price': 5.00,\r\n }\r\n defaults.update(params) # create or update existing\r\n # fields in a dictionary\r\n return Recipe.objects.create(user=user, **defaults)\r\n\r\n\r\ndef sample_tag(user, name='Main Tag'):\r\n return Tag.objects.create(user=user, name=name)\r\n\r\n\r\ndef sample_ingredient(user, name='Some Ingredient'):\r\n return Ingredient.objects.create(user=user, name=name)\r\n\r\n\r\nclass PublicRecipeApiTests(TestCase):\r\n \"\"\"Test unauthenticated recipe API tests\r\n \"\"\"\r\n\r\n def setUp(self):\r\n self.client = APIClient()\r\n\r\n def test_auth_required(self):\r\n res = self.client.get(RECIPE_URL)\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\r\n\r\n\r\nclass PrivateRecipeApiTests(TestCase):\r\n\r\n def setUp(self):\r\n self.client = APIClient()\r\n self.user = get_user_model().objects.create_user(\r\n 'test@test.com',\r\n '123123'\r\n )\r\n self.client.force_authenticate(self.user)\r\n\r\n def test_retrive_recipes(self):\r\n \"\"\"Test retriving list of recipes\r\n \"\"\"\r\n sample_recipe(user=self.user)\r\n sample_recipe(user=self.user)\r\n\r\n res = self.client.get(RECIPE_URL)\r\n recipes = Recipe.objects.all().order_by('-id')\r\n serializer = RecipeSerializer(recipes, many=True)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)\r\n self.assertEqual(res.data, serializer.data)\r\n\r\n def test_recipes_limited_to_user(self):\r\n \"\"\"Test retriving recipes for user\r\n \"\"\"\r\n user2 = get_user_model().objects.create_user(\r\n 'sadf@sadfa.com',\r\n 'asdfasdf'\r\n )\r\n sample_recipe(user=user2)\r\n sample_recipe(user=self.user)\r\n\r\n res = self.client.get(RECIPE_URL)\r\n recipes = Recipe.objects.filter(user=self.user)\r\n serializer = RecipeSerializer(recipes, many=True)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)\r\n self.assertEqual(len(res.data), 1)\r\n self.assertEqual(res.data, serializer.data)\r\n\r\n def test_view_recipe_detail(self):\r\n \"\"\"Test viewing a recipe detail\r\n \"\"\"\r\n recipe = sample_recipe(user=self.user)\r\n # adding a tag to the current recipe\r\n recipe.tags.add(sample_tag(user=self.user))\r\n recipe.ingredients.add(sample_ingredient(user=self.user))\r\n\r\n url = detail_url(recipe.id)\r\n res = self.client.get(url)\r\n serializer = RecipeDetailSerializer(recipe)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)\r\n self.assertEqual(serializer.data, res.data)\r\n\r\n def rest_create_basic_recipe(self):\r\n \"\"\"Test creating recipe\r\n \"\"\"\r\n payload = {\r\n \"title\": \"Salad\",\r\n \"time_minutes\": 10,\r\n \"price\": 5.00\r\n }\r\n res = self.client.post(RECIPE_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\r\n recipe = Recipe.objects.get(id=res.data['id'])\r\n is_equal = all([self.assertEqual(payload[k], getattr(recipe, k))\r\n for k in payload.keys()])\r\n self.assertTrue(is_equal)\r\n\r\n def test_create_recipe_with_tags(self):\r\n \"\"\"Test creating a recipe with tags\r\n \"\"\"\r\n tag1 = sample_tag(user=self.user, name='Vegan')\r\n tag2 = sample_tag(user=self.user, name='Dessert')\r\n payload = {\r\n 'title': 'Avocado line',\r\n 'tags': [tag1.id, tag2.id],\r\n 'time_minutes': 50,\r\n 'price': 20.00\r\n }\r\n res = self.client.post(RECIPE_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\r\n recipe = Recipe.objects.get(id=res.data['id'])\r\n tags = recipe.tags.all() # retreive all the tags not the ids\r\n self.assertEqual(tags.count(), 2)\r\n self.assertIn(tag1, tags) # useful to check list,querysets\r\n self.assertIn(tag2, tags)\r\n\r\n def test_create_recipe_with_ingredients(self):\r\n \"\"\"Test create a recipe with ingredients\r\n \"\"\"\r\n ing1 = sample_ingredient(user=self.user, name='Salt')\r\n ing2 = sample_ingredient(user=self.user, name='Tomato')\r\n\r\n payload = {\r\n 'title': 'Salad',\r\n 'time_minutes': 10,\r\n 'price': 10.00,\r\n 'ingredients': [ing1.id, ing2.id]\r\n }\r\n res = self.client.post(RECIPE_URL, payload)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\r\n recipe = Recipe.objects.get(id=res.data['id'])\r\n\r\n ingredients = recipe.ingredients.all()\r\n self.assertEqual(ingredients.count(), 2)\r\n\r\n self.assertIn(ing1, ingredients)\r\n self.assertIn(ing2, ingredients)\r\n\r\n def test_partial_update_recipe(self):\r\n \"\"\"Test updating a recipe with patch\r\n \"\"\"\r\n recipe = sample_recipe(user=self.user)\r\n recipe.tags.add(sample_tag(user=self.user))\r\n new_tag = sample_tag(user=self.user, name='Curry')\r\n\r\n payload = {\r\n 'title': 'Chicken',\r\n 'tags': [new_tag.id]\r\n }\r\n self.client.patch(detail_url(recipe.id), payload)\r\n recipe.refresh_from_db() # fetches again\r\n\r\n self.assertEqual(recipe.title, payload['title'])\r\n tags = recipe.tags.all()\r\n self.assertEqual(len(tags), 1)\r\n self.assertIn(new_tag, tags)\r\n\r\n def test_full_update_recipe(self):\r\n \"\"\"Teste updating a recipe with put\r\n \"\"\"\r\n recipe = sample_recipe(user=self.user)\r\n recipe.tags.add(sample_tag(user=self.user))\r\n payload = {\r\n \"title\": \"Spaghetti Carbonara\",\r\n 'time_minutes': 25,\r\n 'price': 5.00\r\n }\r\n url = detail_url(recipe.id)\r\n self.client.put(url, payload)\r\n recipe.refresh_from_db()\r\n self.assertEqual(recipe.title, payload['title'])\r\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\r\n self.assertEqual(recipe.price, payload['price'])\r\n self.assertEqual(recipe.tags.count(), 0)\r\n\r\n\r\nclass RecipeImageUploadTests(TestCase):\r\n\r\n def setUp(self):\r\n self.client = APIClient()\r\n self.user = get_user_model().objects.create_user(\r\n 'uset@sdfg.com',\r\n 'sdfasd'\r\n )\r\n self.client.force_authenticate(self.user)\r\n self.recipe = sample_recipe(user=self.user)\r\n\r\n def tearDown(self):\r\n self.recipe.image.delete() # delete the image created from the tests\r\n\r\n def test_upload_image(self):\r\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\r\n # creates a temporary file and stores in the os\r\n # after end this it removes the file\r\n img = Image.new('RGB', (10, 10))\r\n img.save(ntf, format='JPEG')\r\n ntf.seek(0)\r\n res = self.client.post(image_upload_url(self.recipe.id), {\r\n 'image': ntf,\r\n 'format': 'multipart'\r\n # insted of the default format JSON\r\n # we specify the right format for the image\r\n })\r\n\r\n self.recipe.refresh_from_db()\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)\r\n self.assertIn('image', res.data)\r\n # verify if the path exists in the os\r\n self.assertTrue(os.path.exists(self.recipe.image.path))\r\n\r\n def test_upload_image_bad_request(self):\r\n \"\"\"Test uploading an invalid image\r\n \"\"\"\r\n url = image_upload_url(self.recipe.id)\r\n res = self.client.post(url, {\r\n 'image': 'not an image',\r\n })\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\r\n\r\n def test_filter_recipe_by_tags(self):\r\n \"\"\"Test returning recipes with specific tags\r\n \"\"\"\r\n recipe1 = sample_recipe(user=self.user, title='Chicken')\r\n recipe2 = sample_recipe(user=self.user, title='Salad')\r\n tag1 = sample_tag(user=self.user, name='Meat')\r\n tag2 = sample_tag(user=self.user, name='Vegetarian')\r\n recipe1.tags.add(tag1)\r\n recipe2.tags.add(tag2)\r\n recipe3 = sample_recipe(user=self.user, title='Fish')\r\n\r\n res = self.client.get(\r\n RECIPE_URL, {\"tags\": f'{tag1.id},{tag2.id}'}\r\n )\r\n serializer1 = RecipeSerializer(recipe1)\r\n serializer2 = RecipeSerializer(recipe2)\r\n serializer3 = RecipeSerializer(recipe3)\r\n\r\n self.assertIn(serializer1.data, res.data)\r\n self.assertIn(serializer2.data, res.data)\r\n self.assertNotIn(serializer3.data, res.data)\r\n\r\n def test_filter_recipes_filter_by_ingredient(self):\r\n \"\"\"Test returning recipes with specific ingredients\r\n \"\"\"\r\n recipe1 = sample_recipe(user=self.user, title='Chicken')\r\n recipe2 = sample_recipe(user=self.user, title='Salad')\r\n ingredient1 = sample_ingredient(user=self.user, name='Tomato')\r\n ingredient2 = sample_ingredient(user=self.user, name='Lettuce')\r\n recipe1.ingredients.add(ingredient1)\r\n recipe2.ingredients.add(ingredient2)\r\n recipe3 = sample_recipe(user=self.user, title='Fish')\r\n\r\n res = self.client.get(\r\n RECIPE_URL, {\"ingredients\": f'{ingredient1.id},{ingredient2.id}'}\r\n )\r\n serializer1 = RecipeSerializer(recipe1)\r\n serializer2 = RecipeSerializer(recipe2)\r\n serializer3 = RecipeSerializer(recipe3)\r\n\r\n self.assertIn(serializer1.data, res.data)\r\n self.assertIn(serializer2.data, res.data)\r\n self.assertNotIn(serializer3.data, res.data)\r\n","sub_path":"app/exercise/tests/test_recipe_api.py","file_name":"test_recipe_api.py","file_ext":"py","file_size_in_byte":10513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"413612026","text":"\"\"\"\n1024. Video Stitching\n\n# You are given a series of video clips from a sporting event that lasted T seconds.\n# These video clips can be overlapping with each other and have varied lengths.\n\n# Each video clip clips[i] is an interval: it starts at time clips[i][0] and ends at time clips[i][1].\n# We can cut these clips into segments freely: for example, a clip [0, 7] can be cut into segments [0, 1] + [1, 3] + [3, 7].\n\n# Return the minimum number of clips needed so that we can cut the clips into segments that cover the entire sporting event ([0, T]).\n# If the task is impossible, return -1.\n\n\n# Example 1:\n\n# Input: clips = [[0,2],[4,6],[8,10],[1,9],[1,5],[5,9]], T = 10\n# Output: 3\n# Explanation:\n# We take the clips [0,2], [8,10], [1,9]; a total of 3 clips.\n# Then, we can reconstruct the sporting event as follows:\n# We cut [1,9] into segments [1,2] + [2,8] + [8,9].\n# Now we have segments [0,2] + [2,8] + [8,10] which cover the sporting event [0, 10].\n\n# Example 2:\n\n# Input: clips = [[0,1],[1,2]], T = 5\n# Output: -1\n# Explanation:\n# We can't cover [0,5] with only [0,1] and [0,2].\n\n# Example 3:\n\n# Input: clips = [[0,1],[6,8],[0,2],[5,6],[0,4],[0,3],[6,7],[1,3],[4,7],[1,4],[2,5],[2,6],[3,4],[4,5],[5,7],[6,9]], T = 9\n# Output: 3\n# Explanation:\n# We can take clips [0,4], [4,7], and [6,9].\n\n# Example 4:\n\n# Input: clips = [[0,4],[2,8]], T = 5\n# Output: 2\n# Explanation:\n# Notice you can have extra video after the event ends.\n\"\"\"\n\n\nclass VideoStitching:\n\n def doit_greedy(self, clips, T):\n\n clips.sort(key=lambda x: (x[0], -x[1]))\n right, i, count = 0, 0, 0\n\n while right < T:\n\n nextright = right\n while i < len(clips) and clips[i][0] <= right:\n nextright = max(nextright, clips[i][1])\n i += 1\n\n count += 1\n\n if right == nextright:\n return -1\n\n right = nextright\n\n return count\n\n \"\"\"\n Solution 2: Sort + DP\n Sort clips first.\n Then for each clip, update dp[clip[0]] ~ dp[clip[1]].\n\n Time O(NlogN + NT), Space O(T)\n\n \"\"\"\n def doit_dp(self, clips, T):\n \"\"\"\n :param clips:\n :param T:\n :return:\n \"\"\"\n clips.sort(key=lambda x: x[0])\n dp = [float('inf') for _ in range(T+1)]\n dp[0] = 0\n\n for c in clips:\n for i in range(c[0], min(T+1, c[1]+1)):\n dp[i] = min(dp[i], dp[c[0]] + 1)\n\n return -1 if dp[-1] == float('inf') else dp[-1]\n\n def doit_greedy_sort_best(self, clips, T):\n #\n end, end2, res = -1, 0, 0\n\n for i, j in sorted(clips):\n if end2 >= T or i > end2:\n break\n elif end < i <= end2:\n res, end = res + 1, end2\n end2 = max(end2, j)\n return res if end2 >= T else -1\n\n # O(n*log(n))\n def doit_greedy(self, clips, T):\n\n # Greedy\n # 类似的题目都是左端点排序,右端点比大小\n cur_end, aim_end, ans = -1, 0, 0\n clips.sort(key=lambda x: x[0])\n for s, e in clips:\n if aim_end >= T or s > aim_end:\n break\n elif cur_end < s <= aim_end:\n ans += 1\n cur_end = aim_end\n aim_end = max(aim_end, e)\n\n return ans if aim_end >= T else -1\n\n def doit_sort(self, clips, T):\n\n clips.sort(key=lambda x: (x[0], -x[1]))\n\n ans = []\n for c in clips:\n\n if not ans:\n if c[0] != 0:\n return -1\n ans.append(c)\n\n elif c[0] > ans[-1][1]:\n return -1\n elif c[1] <= ans[-1][1]:\n continue\n elif c[0] <= ans[-1][0] and c[1] > ans[-1][1]:\n t = ans.pop()\n ans.append((t[0], c[1]))\n else:\n ans.append((ans[-1][1], c[1]))\n\n if ans and ans[-1][1] >= T:\n return len(ans)\n\n return -1\n\n '''\n First, sort the clips based on the starting time.\n Apply DP to solve this problem\n DP[limit] = Cnt means we can use minimal Cnt clips to cover 0 to limit, inclusive.\n Initially, DP[0] = 0\n When a clip C comes, iterate limits between C[0] and min(T, C[1]), inclusive.\n We can merge interval in this region\n for Limit in range(c[0], newLimit+1):\n DP[newLimit] = min(DP[newLimit], DP[Limit] + 1)\n When last clip is checked, iterate DP list again to find the smallest C that can cover T.\n Time: O(T*n+nlogn), n is the length of clips\n Space: O(T)\n '''\n\n def doit(self, clips, T):\n DP = [0] + [float('inf')] * T\n clips.sort()\n for c in clips:\n if c[0] > T:\n break\n newLimit = min(T, c[1])\n for Limit in range(c[0], newLimit+1):\n DP[newLimit] = min(DP[newLimit], DP[Limit] + 1)\n\n return -1 if DP[-1] == float('inf') else DP[-1]\n\n\nif __name__ == '__main__':\n\n res = VideoStitching().doit(clips=[[0, 2], [4, 6], [8, 10], [1, 9], [1, 5], [5, 9]], T=10)\n\n res = VideoStitching().doit(clips=[[0, 1], [1, 2]], T=5)\n\n res = VideoStitching().doit(clips=[[0, 1], [6, 8], [0, 2], [5, 6], [0, 4], [0, 3], [6, 7], [1, 3], [4, 7], [1, 4], [2, 5], [2, 6], [3, 4], [4, 5], [5, 7], [6, 9]], T=9)\n\n res = VideoStitching().doit(clips=[[0, 4], [2, 8]], T=5)\n\n res = VideoStitching().doit([[5, 7], [1, 8], [0, 0], [2, 3], [4, 5], [0, 6], [5, 10], [7, 10]], 5)","sub_path":"PythonLeetcode/leetcodeM/1024_VideoStitching.py","file_name":"1024_VideoStitching.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"78252349","text":"\"\"\" MINE algorithm core part \"\"\"\n\nimport sys\nimport torch\nimport numpy as np\n\nclass MINE:\n \"\"\" Mutual Information Neural Estimator class \"\"\"\n def __init__(self,\n statistics_network,\n criterion,\n ema_decay):\n \"\"\" Initialize MINE object\n\n Args:\n statistics_network (nn.Module): neural network f(x, z) -> R\n ema_decay (float): decay rate for exponential moving average\n \"\"\"\n assert criterion in ['mine-d', 'mine-f']\n self.statistics_network = statistics_network\n self.ema_decay = ema_decay\n self.criterion = criterion\n self.ema_denominator = None\n self.random_state = np.random.RandomState(0)\n\n def estimate_on_batch(self, x, z):\n \"\"\" Estimate mutual information and return loss function on mini-batch of samples \"\"\"\n self.statistics_network.train()\n\n rand_indices = np.arange(z.size(0))\n self.random_state.shuffle(rand_indices)\n z_marg = z[rand_indices]\n\n T_joint = self.statistics_network(x, z)\n\n T_margin = self.statistics_network(x, z_marg)\n\n if self.criterion == 'mine-d':\n denominator = torch.mean(torch.exp(T_margin))\n\n # Correct biased gradient\n if self.ema_denominator is None:\n self.ema_denominator = denominator\n else:\n self.ema_denominator = ((1.0 - self.ema_decay) * denominator +\n self.ema_decay * self.ema_denominator).detach()\n\n mean_joint = torch.mean(T_joint)\n eMI = (mean_joint - torch.log(denominator))\n loss = -(mean_joint -\n denominator / self.ema_denominator)\n else:\n eMI = (torch.mean(T_joint) -\n torch.mean(torch.exp(T_margin - 1.0)))\n loss = -eMI\n\n return eMI, loss\n\n def estimate_on_dataset(self, loader):\n \"\"\" Estimate mutual information between two distribution\n\n Args:\n loader (DataLoader): Dataloader loads\n mini-batch samples (x, z) from joint distribution p(x, z)\n \"\"\"\n self.statistics_network.eval()\n iterator_joint = iter(loader)\n iterator_marginal = iter(loader)\n\n num_samples = 0.0\n term1, term2 = 0.0, 0.0\n\n try:\n while True:\n x, z = next(iterator_joint)\n _, z_marginal = next(iterator_marginal)\n\n with torch.no_grad():\n statistics_joint = self.statistics_network(x, z)\n statistics_marginal = self.statistics_network(x, z_marginal)\n\n term1 += torch.sum(statistics_joint)\n term2 += torch.sum(torch.exp(statistics_marginal))\n num_samples += statistics_joint.size(0)\n except StopIteration:\n pass\n\n eMI = term1/num_samples - torch.log(term2/num_samples)\n return eMI\n\n","sub_path":"mine.py","file_name":"mine.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"94360916","text":"id#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 18 18:09:33 2020\n\n@author: asif\n\"\"\"\nimport numpy as np\n\n\n\n\n\n\n\ndef Read_lc_qdp(qdp_file_name):\n with open(qdp_file_name,'r') as reader:\n lines=reader.readlines()\n modes=[]\n data=[]\n b=iter(lines[9:])\n data=[]\n block=[]\n for line in b:\n if line[0].isalpha():\n data.append(block)\n block=[] \n line=next(b) \n modes.append(line.strip().split(' ')[1])\n line=next(b)\n line=next(b)\n block.append(line.strip().split('\\t')) \n # print(line)\n data.append(block)\n data.pop(0)\n return [modes,data]\n\n\n\ndef Calc_t_bin(data,r_bin=10.,min_counts=100):\n \"\"\"Takes tdata which has time,count rate. Calculates time bins \n with uniform binning in log with minimum couts\"\"\"\n tstart=data[:,0]\n del_t=np.diff(tstart)\n cr=data[:,3]\n # cr_err=data[:,2]\n del_t=np.insert(del_t, 0, del_t[0])\n \n counts=cr*del_t\n \n t_bin=[0.0,r_bin]\n \n \n while tstart[-1]//t_bin[-1]!=0:\n t_bin.append(t_bin[-1]*r_bin)\n \n total_bins=len(t_bin)\n \n c_bin=[]\n \n for i in range(total_bins-1):\n mask=(tstart >= t_bin[i]) & (tstart<=t_bin[i+1])\n print(counts[mask])\n cum=np.sum(counts[mask])\n c_bin.append(cum)\n \n \n \n indices=[]\n for i in range(total_bins-1):\n if c_bin[i]= self.maxsize:\n raise Exception(\"full error\")\n node = Node(value=value)\n tailnode = self.tailnode() or self.root\n \n tailnode.next = node\n node.prev = tailnode\n node.next = self.root\n self.root.prev = node\n\n self.length += 1\n \n def appendleft(self, value):\n if self.maxsize is not None and self.length >= self.maxsize:\n raise Exception(\"full error\")\n node = Node(value=value)\n if self.root.next == self.root:# empty\n self.root.next = node\n self.root.prev = node\n node.next = self.root\n node.prev = self.root\n else:\n headnode = self.headnode()\n self.root.next = node\n node.prev = self.root\n node.next = headnode\n headnode.prev = node\n\n self.length += 1\n \n def remove(self, node):\n if node is self.root:\n return -1\n else:\n prevnode = node.prev\n nextnode = node.next\n prevnode.next = nextnode\n nextnode.prev = prevnode\n self.length -= 1\n \n def iter_node(self):\n if self.root.next is self.root:\n return\n curnode = self.root.next\n while curnode is not self.root:\n yield curnode\n curnode = curnode.next\n \n def __iter__(self):\n if self.root.next is self.root:\n return\n for node in self.iter_node():\n yield node.value\n \n def iter_node_reverse(self):\n if self.root.next is self.root:\n return\n curnode = self.root.prev\n while curnode is not self.root:\n yield curnode\n curnode = curnode.prev\n\n\ndef test_dll_append():\n dll = CircularDoubleLinkedList()\n dll.append(0)\n dll.append(1) \n assert dll.length == 2\n\ndef test_dll_appendleft():\n dll = CircularDoubleLinkedList()\n dll.append(0)\n dll.append(1)\n assert dll.length == 2\n\ndef test_dll_remove():\n dll = CircularDoubleLinkedList()\n dll.append(0)\n dll.append(1)\n node = dll.headnode()\n dll.remove(node)\n assert dll.length == 1\n\ndef test_dll_iter_node():\n dll = CircularDoubleLinkedList()\n dll.append(0)\n dll.append(1)\n dll.append(2)\n dll.append(3)\n dll.iter_node()\n\ndef test_dll_iter():\n dll = CircularDoubleLinkedList()\n dll.append(0)\n dll.append(1)\n dll.append(2)\n dll.append(3)\n for value in dll:\n print(value)\n\ndef test_dll_iter_node_reverse():\n dll = CircularDoubleLinkedList()\n dll.append(0)\n dll.append(1)\n dll.append(2)\n dll.append(3)\n dll.iter_node_reverse()\n\nif __name__ == \"__main__\":\n test_dll_append()\n test_dll_appendleft()\n test_dll_remove()\n test_dll_iter_node()\n test_dll_iter_node_reverse()\n test_dll_iter()\n\n\n\n \n \n\n","sub_path":"docs/03_链表/my_double_link_list.py","file_name":"my_double_link_list.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"557884430","text":"import cv2\nimport matplotlib.pyplot as plt\n\nBOX_COLOR = (255, 0, 0)\nTEXT_COLOR = (255, 255, 255)\n\ndef Visualize_bbox(img, bbox, class_name, color=BOX_COLOR, thickness=2):\n x_min, y_min, x_max, y_max = bbox\n\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n\n cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)\n ((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)\n cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1)\n cv2.putText(\n img,\n text=class_name,\n org=(x_min, y_min - int(1.3 * text_height)),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.35,\n color=TEXT_COLOR,\n lineType=cv2.LINE_AA\n )\n return img\n\n\ndef Visualize(image, bboxes, category_ids, category_id_to_name, cropped, image_path, i, m, save_dir = None, color = BOX_COLOR, thickness = 2):\n img = image.copy()\n for bbox, category_id in zip(bboxes, category_ids):\n\n class_name = category_id_to_name.get(category_id)\n if cropped:\n plt.figure(figsize=(12, 12))\n plt.axis('off')\n plt.imshow(img)\n image_path = image_path.replace('.jpg', '')\n plt.savefig(save_dir + '/' + image_path + str(i) + 'crop-' + str(m) + '.jpg')\n else:\n img = Visualize_bbox(img, bbox, class_name, color, thickness)\n plt.figure(figsize=(12, 12))\n plt.axis('off')\n plt.imshow(img)\n plt.show()\n\n\nif __name__ == '__main__':\n print('Done!')\n","sub_path":"Visualize.py","file_name":"Visualize.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"511752341","text":"import torch\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport torch.utils.data\r\nfrom sklearn.datasets import load_digits\r\n\r\n\r\ndef contour_torch(xmin, xmax, ymin, ymax, M, ngrid = 33):\r\n \"\"\"\r\n make a contour plot without the magic\r\n\r\n Note- your network can be passed in as paramter M without any modification.\r\n @param xmin: lowest value of x in the plot\r\n @param xmax: highest value of x in the plot\r\n @param ymin: ditto for y\r\n @param ymax: ditto for y\r\n @param M: prediction function, takes a (X,Y,2) torch tensor as input and returns an (X,Y) torch tensor as output\r\n @param ngrid: \r\n \"\"\"\r\n with torch.no_grad():\r\n (X,Y) = XOR_data()\r\n xgrid = torch.linspace(xmin, xmax, ngrid)\r\n ygrid = torch.linspace(ymin, ymax, ngrid)\r\n (xx, yy) = torch.meshgrid((xgrid, ygrid))\r\n D = torch.cat((xx.reshape(ngrid, ngrid, 1), yy.reshape(ngrid, ngrid, 1)), dim = 2)\r\n zz = M(D)[:,:,0]\r\n cs = plt.contour(xx.cpu().numpy(), yy.cpu().numpy(), zz.cpu().numpy(),\r\n cmap = 'RdYlBu')\r\n plt.clabel(cs)\r\n for i in range(Y.shape[0]):\r\n if Y[i] == 1:\r\n plt.plot(X[i,0],X[i,1],'ro')\r\n if Y[i] == 0:\r\n plt.plot(X[i,0],X[i,1],'bo')\r\n plt.show()\r\n\r\n\r\ndef torch_digits():\r\n \"\"\"\r\n Get the training and test datasets for your convolutional neural network\r\n @return train, val: two torch.utils.data.Datasets\r\n \"\"\"\r\n digits, labels = load_digits(return_X_y=True)\r\n digits = torch.tensor(np.reshape(digits, [-1, 8, 8]), dtype=torch.float)\r\n print(digits.shape)\r\n labels = torch.tensor(np.reshape(labels, [-1]), dtype=torch.long)\r\n val_X = digits[:180,:,:]\r\n val_Y = labels[:180]\r\n digits = digits[180:,:,:]\r\n labels = labels[180:]\r\n train = torch.utils.data.TensorDataset(digits, labels)\r\n val = torch.utils.data.TensorDataset(val_X, val_Y)\r\n return train, val\r\n\r\n\r\ndef XOR_data():\r\n X = torch.tensor([[-1., -1.], [1., -1.], [-1., 1.], [1., 1.]])\r\n Y = (-torch.prod(X, dim=1)+1.)/2 \r\n return X, Y.view(-1,1)\r\n\r\n\r\ndef plot_PCA(intermediate, labels):\r\n \"\"\"\r\n Create a scatterplot of intermediate \r\n @param intermediate: numpy NxD\r\n @param labels: numpy (N,)\r\n \"\"\"\r\n pca = PCA(2)\r\n ft = pca.fit_transform(intermediate)\r\n for i in range(10):\r\n plt.scatter(ft[labels==i,0], ft[labels==i, 1], label=str(i), alpha=0.4)\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\ndef get_image():\r\n \"\"\"\r\n @return img: (N, M, 3) image with values ranging from 0 to 1\r\n \"\"\"\r\n return plt.imread('LunarEclipseCologne_Junius_960.jpg')/255.0 # display image as a float to avoid overflows/underflows\r\n\r\n\r\ndef loss_batch(model, loss_func, xb, yb, opt=None):\r\n \"\"\" Compute the loss of the model on a batch of data, or do a step of optimization.\r\n\r\n @param model: the neural network\r\n @param loss_func: the loss function (can be applied to model(xb), yb)\r\n @param xb: a batch of the training data to input to the model\r\n @param yb: a batch of the training labels to input to the model\r\n @param opt: a torch.optimizer.Optimizer. If not None, use the Optimizer to improve the model. Otherwise, just compute the loss.\r\n @return a numpy array of the loss of the minibatch, and the length of the minibatch\r\n \"\"\"\r\n loss = loss_func(model(xb), yb)\r\n\r\n if opt is not None:\r\n loss.backward()\r\n opt.step()\r\n opt.zero_grad()\r\n\r\n return loss.item(), len(xb) \r\n","sub_path":"hw2/hw2_utils.py","file_name":"hw2_utils.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"97887740","text":"'''\n我们事先了一个telnet客户端的类TelnetClient,调用实例的start()方法\n启动客户端与服务器交互,交互完毕后需调用cleanup()方法,关闭已连接的socket\n以及将操作历史记录写入文件并关闭\n\n能否让telnetClient实例支持上下文管理协议,从而替代手工调用cleanup()方法\n'''\nfrom telnetlib import Telnet\nfrom sys import stdin,stdout\nfrom collections import deque\n\nclass TelnetClient():\n def __init__(self,addr,port=23):\n self.addr=addr\n self.port=port\n self.tn=None\n\n def start(self):\n self.tn=Telnet(self.addr,self.port)\n self.history=deque()\n\n\n\n #user\n t=self.tn.read_until(b\"login: \")\n stdout.write(t)\n user=stdin.readline()\n self.tn.write(user)\n\n #password\n t=self.tn.read_until(b\"Password: \")\n if t.startswith(user[:-1]): t= t[len(user) + 1:]\n stdout.write(t)\n self.tn.write(stdin.readline())\n\n t=self.tn.read_until(b'$ ')\n stdout.write(t)\n while True:\n uinput=stdin.readline()\n if not uinput:\n break\n self.history.append(uinput)\n self.tn.write(uinput)\n t=self.tn.read_until('$ ')\n stdout.write(t[len(uinput)+ 1:])\n\n def cleanup(self):\n self.tn.close()\n self.tn=None\n with open(self.addr+\"_history.txt\",\"w\") as f:\n f.writelines(self.history)\n\nclient=TelnetClient('192.168.0.1')\nprint(\"\\nstrat\")\nclient.start()\nprint(\"\\ncleanup\")\nclient.cleanup()","sub_path":"chapter_seven/如何让对象支持上下文管理???.py","file_name":"如何让对象支持上下文管理???.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"208957026","text":"def fileWriting(dataList):\n fileExt = 0\n fileName = \" \"\n fileNameFull = \" \"\n WRITE = \"w\"\n print(\"Please enter your file name\")\n fileName = input()\n print(\"Choose extension for your file\\n 1.txt\\n 2.csv\")\n fileExt = int(input(\"Please enter the digit: \"))\n while fileExt != 1 and fileExt != 2:\n fileExt = input(\"Please, choose ext. from the list and enter the digit:\")\n if fileExt == 1:\n fileNameFull = fileName + \".txt\"\n elif fileExt == 2:\n fileNameFull = fileName + \".csv\"\n print(\"File will be saved as \" + fileNameFull)\n with open(fileNameFull, mode = WRITE) as fileToWrite:\n for data in dataList:\n fileToWrite.write(data)\n fileToWrite.close()\n return\n\n#Для сбора информации от пользователя.\n#For input data from user.\ndef dataInput():\n data = \" \"\n dataList = [ ]\n dataListSepar = [ ]\n print(\"Please enter your data (when you will finish, write DONE):\")\n while data != \"DONE\":\n data = input()\n dataList.append(\"\\n\" + data)\n dataList.remove(\"\\nDONE\")\n## Печатает список в нормально виде.\n## dataListSepar = \"\\n\".join(dataList)\n## print(dataListSepar)\n return dataList\n\ndataList = dataInput()\nfileWriting(dataList)\n","sub_path":"funInputWrite.py","file_name":"funInputWrite.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"549303812","text":"\"\"\"Construction Schema\"\"\"\nfrom pydantic import Field, constr\nfrom typing import List, Union\n\nfrom ._base import IDdEnergyBaseModel\nfrom .material import EnergyMaterial, EnergyMaterialNoMass, \\\n EnergyWindowMaterialGas, EnergyWindowMaterialGasCustom, \\\n EnergyWindowMaterialGasMixture, EnergyWindowMaterialSimpleGlazSys, \\\n EnergyWindowMaterialBlind, EnergyWindowMaterialGlazing, EnergyWindowMaterialShade\nfrom .schedule import ScheduleRuleset, ScheduleFixedInterval\n\n\nclass WindowConstructionAbridged(IDdEnergyBaseModel):\n \"\"\"Construction for window objects (Aperture, Door).\"\"\"\n\n type: constr(regex='^WindowConstructionAbridged$') = 'WindowConstructionAbridged'\n\n layers: List[constr(min_length=1, max_length=100)] = Field(\n ...,\n description='List of strings for material identifiers. The order of the '\n 'materials is from exterior to interior.',\n min_items=1,\n max_items=8\n )\n\n\nclass WindowConstruction(WindowConstructionAbridged):\n \"\"\"Construction for window objects (Aperture, Door).\"\"\"\n\n type: constr(regex='^WindowConstruction$') = 'WindowConstruction'\n\n materials: List[\n Union[\n EnergyWindowMaterialGas, EnergyWindowMaterialGasCustom, EnergyWindowMaterialGasMixture,\n EnergyWindowMaterialSimpleGlazSys, EnergyWindowMaterialBlind,\n EnergyWindowMaterialGlazing, EnergyWindowMaterialShade\n ]\n ] = Field(\n ...,\n description='List of materials. The order of the materials is from outside '\n 'to inside.',\n min_items=1,\n max_items=8\n )\n\n\nclass OpaqueConstructionAbridged(IDdEnergyBaseModel):\n \"\"\"Construction for opaque objects (Face, Shade, Door).\"\"\"\n\n type: constr(regex='^OpaqueConstructionAbridged$') = 'OpaqueConstructionAbridged'\n\n layers: List[constr(min_length=1, max_length=100)] = Field(\n ...,\n description='List of strings for material identifiers. The order of the materials '\n 'is from exterior to interior.',\n min_items=1,\n max_items=10\n )\n\n\nclass OpaqueConstruction(OpaqueConstructionAbridged):\n \"\"\"Construction for opaque objects (Face, Shade, Door).\"\"\"\n\n type: constr(regex='^OpaqueConstruction$') = 'OpaqueConstruction'\n\n materials: List[Union[EnergyMaterial, EnergyMaterialNoMass]] = Field(\n ...,\n description='List of materials. The order of the materials is from outside to'\n ' inside.',\n min_items=1,\n max_items=10\n )\n\n\nclass ShadeConstruction(IDdEnergyBaseModel):\n \"\"\"Construction for Shade objects.\"\"\"\n\n type: constr(regex='^ShadeConstruction$') = 'ShadeConstruction'\n\n solar_reflectance: float = Field(\n 0.2,\n ge=0,\n le=1,\n description=' A number for the solar reflectance of the construction.'\n )\n\n visible_reflectance: float = Field(\n 0.2,\n ge=0,\n le=1,\n description=' A number for the visible reflectance of the construction.'\n )\n\n is_specular: bool = Field(\n default=False,\n description='Boolean to note whether the reflection off the shade is diffuse '\n '(False) or specular (True). Set to True if the construction is '\n 'representing a glass facade or a mirror material.'\n )\n\n\nclass AirBoundaryConstructionAbridged(IDdEnergyBaseModel):\n \"\"\"Construction for Air Boundary objects.\"\"\"\n\n type: constr(regex='^AirBoundaryConstructionAbridged$') = \\\n 'AirBoundaryConstructionAbridged'\n\n air_mixing_per_area: float = Field(\n 0.1,\n ge=0,\n description='A positive number for the amount of air mixing between Rooms '\n 'across the air boundary surface [m3/s-m2]. Default: 0.1 corresponds '\n 'to average indoor air speeds of 0.1 m/s (roughly 20 fpm), which is '\n 'typical of what would be induced by a HVAC system.'\n )\n\n air_mixing_schedule: str = Field(\n ...,\n min_length=1,\n max_length=100,\n description='Identifier of a fractional schedule for the air mixing schedule '\n 'across the construction.'\n )\n\n\nclass AirBoundaryConstruction(AirBoundaryConstructionAbridged):\n \"\"\"Construction for Air Boundary objects.\"\"\"\n\n type: constr(regex='^AirBoundaryConstruction$') = 'AirBoundaryConstruction'\n\n air_mixing_schedule: Union[ScheduleRuleset, ScheduleFixedInterval] = Field(\n ...,\n description='A fractional schedule as a ScheduleRuleset or '\n 'ScheduleFixedInterval for the air mixing schedule across '\n 'the construction.'\n )\n\n class Config:\n @staticmethod\n def schema_extra(schema, model):\n schema['properties']['air_mixing_schedule']['anyOf'] = [\n {\"$ref\": \"#/components/schemas/ScheduleRuleset\"},\n {\"$ref\": \"#/components/schemas/ScheduleFixedInterval\"}\n ]\n\n\nif __name__ == '__main__':\n print(WindowConstructionAbridged.schema_json(indent=2))\n print(WindowConstruction.schema_json(indent=2))\n print(OpaqueConstructionAbridged.schema_json(indent=2))\n print(OpaqueConstruction.schema_json(indent=2))\n print(ShadeConstruction.schema_json(indent=2))\n print(AirBoundaryConstruction.schema_json(indent=2))","sub_path":"honeybee_schema/energy/construction.py","file_name":"construction.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"287423940","text":"__author__ = \"Max Dippel, Michael Burkart and Matthias Urban\"\n__version__ = \"0.0.1\"\n__license__ = \"BSD\"\n\nfrom autoPyTorch.pipeline.base.pipeline_node import PipelineNode\nfrom autoPyTorch.utils.config.config_option import ConfigOption, to_bool\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nimport numpy as np\nimport scipy.sparse\n\nclass OneHotEncoding(PipelineNode):\n def __init__(self):\n super(OneHotEncoding, self).__init__()\n self.encode_Y = False\n\n def fit(self, pipeline_config, X, Y, dataset_info):\n categorical_features = dataset_info.categorical_features\n ohe = OneHotEncoder(categories=\"auto\", sparse=False, handle_unknown=\"ignore\")\n encoder = ColumnTransformer(transformers=[(\"ohe\", ohe, [i for i, f in enumerate(categorical_features) if f])], remainder=\"passthrough\")\n encoder.categories_ = np.array([])\n encoder.categorical_features = categorical_features\n\n if any(categorical_features) and not dataset_info.is_sparse:\n # encode X\n X = encoder.fit_transform(X)\n encoder.categories_ = encoder.transformers_[0][1].categories_\n\n # Y to matrix\n Y, y_encoder = self.complete_y_tranformation(Y)\n\n dataset_info.categorical_features = None\n return {'X': X, 'one_hot_encoder': encoder, 'Y': Y, 'y_one_hot_encoder': y_encoder, 'dataset_info': dataset_info}\n\n def predict(self, pipeline_config, X, one_hot_encoder):\n categorical_features = pipeline_config[\"categorical_features\"]\n if categorical_features and any(categorical_features) and not scipy.sparse.issparse(X):\n X = one_hot_encoder.transform(X)\n return {'X': X, 'one_hot_encoder': one_hot_encoder}\n \n def reverse_transform_y(self, Y, y_one_hot_encoder):\n if y_one_hot_encoder is None:\n return Y\n return y_one_hot_encoder.categories_[0][np.argmax(Y, axis=1)].reshape(-1, 1)\n \n def transform_y(self, Y, y_one_hot_encoder):\n if y_one_hot_encoder is None:\n return Y\n return y_one_hot_encoder.transform(Y.reshape(-1, 1))\n \n def complete_y_tranformation(self, Y):\n # Y to matrix\n y_encoder = None\n Y = Y.astype(np.float32)\n if len(Y.shape) == 1:\n Y = Y.reshape(-1, 1)\n\n # encode Y\n if self.encode_Y:\n y_encoder = OneHotEncoder(sparse=False, categories=\"auto\", handle_unknown='ignore')\n y_encoder.categories_ = np.array([])\n Y = y_encoder.fit_transform(Y)\n return Y, y_encoder","sub_path":"autoPyTorch/pipeline/nodes/one_hot_encoding.py","file_name":"one_hot_encoding.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"540372441","text":"import json\nfrom cyclus.lib import Hdf5Back\nimport uuid\nimport csv\nfrom pyne import nucname\n\n'''\nThis Object is very customize right now !!!! \n'''\n\n\n# This class is defined to extract the attribute value from input json file\nclass JSONReader:\n def __init__(self, data):\n self.data = data\n\n def setData(self, data):\n self.data = data\n\n def getReactorAttribute(self, att):\n return self.data['simulation']['facility'][2]['config']['Reactor'][att]\n\n def getControlAttribute(self, att):\n return self.data['simulation']['control'][att]\n\n def getInstitutionAttribute(self, att):\n return self.data['simulation']['region']['institution']['config']['DeployInst'][att]['val'][2]\n\n def getAttribute(self):\n attributes = []\n attributes.append(self.getReactorAttribute('refuel_time'))\n attributes.append(self.getReactorAttribute('cycle_time'))\n attributes.append(self.getReactorAttribute('power_cap'))\n attributes.append(self.getControlAttribute('duration'))\n attributes.append(self.getInstitutionAttribute('build_times'))\n attributes.append(self.getInstitutionAttribute('lifetimes'))\n return attributes\n\n\nclass HDF5Reader:\n def __init__(self, db):\n self.db = db\n\n def setDB(self, db):\n self.db = db\n\n def getU235(self):\n pass\n\n def getTotalPower(self):\n TimeSeriesPower = db.query('TimeSeriesPower')\n SUM = 0\n for i, sid in enumerate(TimeSeriesPower.SimId):\n SUM += TimeSeriesPower.loc[i].Value\n return SUM\n def getTables(self):\n return db.tables\n\n\n######################################################################################################\n\noutput = [['refuel_time', 'cycle_time', 'power_cap', 'duration', 'build_times', 'lifetimes', 'Totalpower']]\nfor i in range(0, 100):\n with open('new' + str(i) + '.json', 'r') as f:\n data = json.load(f)\n db = Hdf5Back('out' + str(i) + '.h5')\n\n jsonreader = JSONReader(data)\n hdfread = HDF5Reader(db)\n tables = hdfread.getTables()\n\n entry = jsonreader.getAttribute()\n if \"TimeSeriesPower\" in tables:\n entry.append(hdfread.getTotalPower())\n else:\n entry.append(0)\n output.append(entry)\n\nwith open('output.csv', 'w') as f:\n write = csv.writer(f)\n for entry in output:\n write.writerow(entry)\n","sub_path":"input/DataParser.py","file_name":"DataParser.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"425097335","text":"import subprocess\nimport random\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\n\n# Never hardcode your password!\nsudo_pass = '45AM15lr3990'\ninterface = 'wlx74da38ef3fbd'\n# interface = 'wlp59s0'\n# iwlist cmd\ncmd = 'sudo iwlist ' + interface + ' scan'\n\namur = '34:41:5D:9E:EF:2B'\ntaswlan = '80:1F:02:EE:3B:0F'\n\nlab_vorne_file = 'lab_vorne.csv'\nlab_hinten_file = 'lab_hinten.csv'\nlab_file = 'lab.csv'\ngang_lab_file = 'gang_lab.csv'\ngang_hinten_file = 'gang_hinten.csv'\ngang_notaus_file = 'gang_notaus.csv'\ntreppen_file = 'treppen.csv'\n\n\nbssids = np.loadtxt('bssids.csv', dtype=str)\n\nlab_vorne = np.loadtxt(lab_vorne_file, dtype=float)\nlab_hinten = np.loadtxt(lab_hinten_file, dtype=float)\n# lab = np.loadtxt(lab_file, dtype=float)\ngang_lab = np.loadtxt(gang_lab_file, dtype=float)\ngang_hinten = np.loadtxt(gang_hinten_file, dtype=float)\ngang_notaus = np.loadtxt(gang_notaus_file, dtype=float)\ntreppen = np.loadtxt(treppen_file, dtype=float)\n\ndata = lab_vorne\ndata = np.append(data, lab_hinten, axis=0)\n# data = np.append(data, np.loadtxt(lab_file, dtype=float), axis=0)\ndata = np.append(data, gang_lab, axis=0)\ndata = np.append(data, gang_hinten, axis=0)\ndata = np.append(data, gang_notaus, axis=0)\ndata = np.append(data, treppen, axis=0)\n\nlbls = np.array(['lab_vorne']*lab_vorne.shape[0] + ['lab_hinten']*lab_hinten.shape[0] + ['gang_lab']*gang_lab.shape[0] + ['gang_hinten']*gang_hinten.shape[0] + ['gang_notaus']*gang_notaus.shape[0] + ['treppen']*treppen.shape[0])\n# lbls = np.array(['lab']*(lab_vorne.shape[0] + lab_hinten.shape[0]) + ['gang']*(gang_lab.shape[0] + gang_hinten.shape[0] + gang_notaus.shape[0]) + ['treppen']*treppen.shape[0])\ndata = scale(data)\nprint('data.shape:', data.shape)\n# random.seed(2)\ntest = data[0].reshape(1, -1)\nnp.delete(data, 0)\ntest_lbl = [lbls[0]]\nnp.delete(lbls, 0)\nfor i in range(599):\n r = random.randint(0, data.shape[0]-1)\n test = np.append(test, data[r].reshape(1, -1), axis=0)\n np.delete(data, r)\n test_lbl.append(lbls[r])\n np.delete(lbls, r)\n\n# random.seed(3)\n# layers = (random.randint(9, 900), random.randint(9, 900), random.randint(9, 900))\nlayers = (160, 61)\nprint('layers', layers)\nmlp = MLPClassifier(hidden_layer_sizes=layers, activation='relu', solver='lbfgs', alpha=0.1, verbose=False)\nmlp.fit(data, lbls)\n\n\npca = PCA(n_components=4)\npca.fit(data)\ndata_pca = pca.transform(data)\npca_knn = KNeighborsClassifier(n_neighbors=3)\npca_knn.fit(data_pca, lbls)\ntest_pca = pca.transform(test)\n# pca_score.append(round(pca_knn.score(test_pca, test_lbl), 3))\n\n\nwhile input('Press enter to collect sample') != 'q':\n print('Collecting wifi sample')\n for i in range(3):\n cmd1 = subprocess.Popen(['echo',sudo_pass], stdout=subprocess.PIPE)\n cmd2 = subprocess.Popen(['sudo','-S'] + cmd.split(), stdin=cmd1.stdout, stdout=subprocess.PIPE)\n wifi = cmd2.stdout.read().decode().lower()\n\n # Retreive MAC and signal strength\n wifi = wifi.split('cell')\n dct = {}\n for w in wifi:\n for line in w.split('\\n'):\n words = line.split(' ')\n if 'address:' in words:\n mac = words[-1]\n if 'signal' in words:\n strength = words[-4].split('=')[-1]\n dct[mac] = strength\n \n signal = np.zeros_like(bssids, dtype=int)\n for j, bssid in enumerate(bssids):\n try:\n signal[j] = dct[bssid]\n except KeyError:\n pass\n \n # signal_pca = pca.transform(signal.reshape(1, -1))\n # print('PCA_KNN:', pca_knn.predict(signal_pca))\n print('KNN:', pca_knn.predict(pca.transform(scale(signal.reshape(1, -1)))))\n print('MLP:', mlp.predict(signal.reshape(1, -1)))\n","sub_path":"data_bu/predict_pca_knn.py","file_name":"predict_pca_knn.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"463184012","text":"import urllib.request\nimport urllib.parse\nimport json\n\nclass ClientRequest:\n def __init__(self):\n self.__ApiUserId = 'U548163'\n self.__token = 'yYkf3y1P7CeRV3i9C89cMXgjpq3aN0Qt'\n self.__host = 'http://52.83.191.213:8081/'\n\n\n def url(self, url):\n return self.__host + url\n\n def mergeToken(self, submit_data):\n submit_data['ApiUserId'] = self.__ApiUserId\n submit_data['token'] = self.__token\n return submit_data\n\n def post(self, url, submit_data):\n submit_data = urllib.parse.urlencode(submit_data)\n submit_data = submit_data.encode('utf-8')\n request = urllib.request.Request(url)\n request.add_header(\"Content-Type\",\"application/x-www-form-urlencoded;charset=utf-8\")\n res = urllib.request.urlopen(request, submit_data)\n return res.read().decode('utf-8')\n \n def postJson(self, url, submit_data):\n submit_data = json.dumps(submit_data)\n submit_data = bytes(submit_data, 'utf8')\n request = urllib.request.Request(url)\n request.add_header(\"Content-Type\",\"application/json;charset=utf-8\")\n res = urllib.request.urlopen(request, submit_data)\n return res.read().decode('utf-8')\n \n def postGo(self, url, submit_data):\n url = self.url(url)\n submit_data = self.mergeToken(submit_data)\n return self.post(url, submit_data)\n\n def postXML(self, url, submit_data=''):\n \n request = urllib.request.Request(url)\n request.add_header(\"Content-Type\",\"application/xml;charset=utf-8\")\n submit_data = submit_data.replace('\\r', '').replace('\\n', '')\n res = urllib.request.urlopen(request, submit_data.encode('utf-8'))\n res = res.read().decode('utf-8')\n\n return res\n\nif __name__ == '__main__':\n clientRequest = ClientRequest()\n print(clientRequest.postGo('v1/Dingding/SendText', {'touser': '091716111036380986', 'text': 'aaa'}))\n","sub_path":"python/ClientRequest.py","file_name":"ClientRequest.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"582458256","text":"import copy\nimport json\nimport logging\nimport os\n\nfrom lib import InferDeepgrow, MyStrategy, TrainDeepgrow\nfrom monai.apps import load_from_mmar\n\nfrom monailabel.interfaces import MONAILabelApp\nfrom monailabel.utils.activelearning import Random\nfrom monailabel.utils.infer.deepgrow_pipeline import InferDeepgrowPipeline\n\nlogger = logging.getLogger(__name__)\n\n\nclass MyApp(MONAILabelApp):\n def __init__(self, app_dir, studies):\n self.model_dir = os.path.join(app_dir, \"model\")\n\n self.model_dir_2d = os.path.join(self.model_dir, \"deepgrow_2d\")\n self.final_model_2d = os.path.join(self.model_dir, \"deepgrow_2d\", \"model.pt\")\n self.train_stats_path_2d = os.path.join(self.model_dir, \"deepgrow_2d\", \"train_stats.json\")\n self.mmar_2d = \"clara_pt_deepgrow_2d_annotation_1\"\n\n self.model_dir_3d = os.path.join(self.model_dir, \"deepgrow_3d\")\n self.final_model_3d = os.path.join(self.model_dir, \"deepgrow_3d\", \"model.pt\")\n self.train_stats_path_3d = os.path.join(self.model_dir, \"deepgrow_3d\", \"train_stats.json\")\n self.mmar_3d = \"clara_pt_deepgrow_3d_annotation_1\"\n\n super().__init__(app_dir, studies)\n\n def init_infers(self):\n infers = {\n \"deepgrow_2d\": InferDeepgrow(self.final_model_2d, load_from_mmar(self.mmar_2d, self.model_dir_2d)),\n \"deepgrow_3d\": InferDeepgrow(\n self.final_model_3d,\n load_from_mmar(self.mmar_3d, self.model_dir_3d),\n dimension=3,\n model_size=(128, 192, 192),\n ),\n }\n\n infers[\"deepgrow_pipeline\"] = InferDeepgrowPipeline(\n path=None,\n network=load_from_mmar(self.mmar_2d, self.model_dir_2d),\n model_3d=infers[\"deepgrow_3d\"],\n description=\"Combines Deepgrow 2D model and 3D deepgrow model\",\n )\n return infers\n\n def init_strategies(self):\n return {\n \"random\": Random(),\n \"first\": MyStrategy(),\n }\n\n def train(self, request):\n logger.info(f\"Training request: {request}\")\n\n model = request.get(\"model\", \"deepgrow_2d\")\n models = [\"deepgrow_2d\", \"deepgrow_3d\"] if model == \"all\" else [model]\n logger.info(f\"Selected models for training: {models}\")\n\n tasks = []\n for model in models:\n logger.info(f\"Creating Training task for model: {model}\")\n\n if model == \"deepgrow_2d\":\n mmar = self.mmar_2d\n model_dir = self.model_dir_2d\n final_model = self.final_model_2d\n train_stats_path = self.train_stats_path_2d\n else:\n mmar = self.mmar_3d\n model_dir = self.model_dir_3d\n final_model = self.final_model_3d\n train_stats_path = self.train_stats_path_3d\n\n output_dir = os.path.join(model_dir, request.get(\"name\", \"model_01\"))\n\n # App Owner can decide which checkpoint to load (from existing output folder or from base checkpoint)\n load_path = os.path.join(output_dir, \"model.pt\")\n if not os.path.exists(load_path) and request.get(\"pretrained\", True):\n load_path = None\n network = load_from_mmar(mmar, model_dir)\n else:\n network = load_from_mmar(mmar, model_dir, pretrained=False)\n\n # Datalist for train/validation\n train_d, val_d = self.partition_datalist(self.datastore().datalist(), request.get(\"val_split\", 0.2))\n\n if model == \"deepgrow_3d\":\n task = TrainDeepgrow(\n dimension=3,\n roi_size=(128, 192, 192),\n model_size=(128, 192, 192),\n max_train_interactions=15,\n max_val_interactions=20,\n output_dir=output_dir,\n train_datalist=train_d,\n val_datalist=val_d,\n network=network,\n load_path=load_path,\n publish_path=final_model,\n stats_path=train_stats_path,\n device=request.get(\"device\", \"cuda\"),\n lr=request.get(\"lr\", 0.0001),\n max_epochs=request.get(\"epochs\", 1),\n amp=request.get(\"amp\", True),\n train_batch_size=request.get(\"train_batch_size\", 1),\n val_batch_size=request.get(\"val_batch_size\", 1),\n )\n elif model == \"deepgrow_2d\":\n flatten_train_d = []\n for _ in range(max(request.get(\"2d_train_random_slices\", 20), 1)):\n flatten_train_d.extend(copy.deepcopy(train_d))\n logger.info(f\"After flatten:: {len(train_d)} => {len(flatten_train_d)}\")\n\n flatten_val_d = []\n for _ in range(max(request.get(\"2d_val_random_slices\", 5), 1)):\n flatten_val_d.extend(copy.deepcopy(val_d))\n logger.info(f\"After flatten:: {len(val_d)} => {len(flatten_val_d)}\")\n\n task = TrainDeepgrow(\n dimension=2,\n roi_size=(256, 256),\n model_size=(256, 256),\n max_train_interactions=15,\n max_val_interactions=5,\n output_dir=output_dir,\n train_datalist=flatten_train_d,\n val_datalist=flatten_val_d,\n network=network,\n load_path=load_path,\n publish_path=final_model,\n stats_path=train_stats_path,\n device=request.get(\"device\", \"cuda\"),\n lr=request.get(\"lr\", 0.0001),\n max_epochs=request.get(\"2d_epochs\", 1),\n amp=request.get(\"amp\", True),\n train_batch_size=request.get(\"2d_train_batch_size\", 4),\n val_batch_size=request.get(\"2d_val_batch_size\", 4),\n )\n else:\n raise Exception(f\"Train Definition for {model} Not Found\")\n\n tasks.append(task)\n\n logger.info(f\"Total Train tasks to run: {len(tasks)}\")\n result = None\n for task in tasks:\n result = task()\n return result\n\n def train_stats(self):\n # Return both 2D and 3D stats. Set current running or deepgrow_3d stats as active\n res = {}\n active = {}\n start_ts = 0\n for model in [\"deepgrow_3d\", \"deepgrow_2d\"]:\n train_stats_path = os.path.join(self.model_dir, model, \"train_stats.json\")\n if os.path.exists(train_stats_path):\n with open(train_stats_path, \"r\") as fc:\n r = json.load(fc)\n res[model] = r\n\n # Set current running or last ran model as active\n if not active or r.get(\"current_time\") or r.get(\"start_ts\", 0) > start_ts:\n start_ts = r.get(\"start_ts\", 0)\n active = copy.deepcopy(r)\n\n active.update(res)\n return active\n","sub_path":"sample-apps/generic_deepgrow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"265549127","text":"##############################################################################\n#\n# Copyright (c) 2004-2008 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Test harness for the test runner itself.\n\"\"\"\nfrom __future__ import print_function\n\nimport re\nimport gc\nimport os\nimport sys\nimport unittest\n\nimport doctest\nfrom zope.testing import renormalizing\n\n\n#separated checkers for the different platform,\n#because it s...s to maintain just one\nif sys.platform == 'win32':\n checker = renormalizing.RENormalizing([\n # 2.5 changed the way pdb reports exceptions\n (re.compile(r\":\"),\n r'exceptions.\\1Error:'),\n\n #rewrite pdb prompt to ... the current location\n #windows, py2.4 pdb seems not to put the '>' on doctest locations\n #therefore we cut it here\n (re.compile('^> doctest[^\\n]+->None$', re.M), '...->None'),\n\n #rewrite pdb prompt to ... the current location\n (re.compile('^> [^\\n]+->None$', re.M), '> ...->None'),\n\n (re.compile(r\"\"),(r'?')),\n (re.compile(r\":\"),\n r'exceptions.\\1Error:'),\n\n # testtools content formatter is used to mime-encode\n # tracebacks when the SubunitOutputFormatter is used, and the\n # resulting text includes a size which can vary depending on\n # the path included in the traceback.\n (re.compile(r'traceback\\n[A-F\\d]+', re.MULTILINE),\n r'traceback\\nNNN'),\n\n (re.compile(\"'[A-Za-z]:\\\\\\\\\"), \"'\"), # hopefully, we'll make Windows happy\n # replaces drives with nothing\n\n (re.compile(r'\\\\\\\\'), '/'), # more Windows happiness\n # double backslashes in coverage???\n\n (re.compile(r'\\\\'), '/'), # even more Windows happiness\n # replaces backslashes in paths\n\n (re.compile(r'/r$', re.MULTILINE), '\\\\r'), # undo some of that\n\n #this is a magic to put linefeeds into the doctest\n (re.compile('##r##\\n'), '\\r'),\n\n (re.compile(r'\\d+[.]\\d\\d\\d seconds'), 'N.NNN seconds'),\n (re.compile(r'\\d+[.]\\d\\d\\d s'), 'N.NNN s'),\n (re.compile(r'\\d+[.]\\d\\d\\d{'), 'N.NNN{'),\n (re.compile(r'\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d\\.\\d+'),\n 'YYYY-MM-DD HH:MM:SS.mmmmmm'),\n (re.compile('( |\")[^\\n]+testrunner-ex'), r'\\1testrunner-ex'),\n (re.compile('( |\")[^\\n]+testrunner.py'), r'\\1testrunner.py'),\n (re.compile(r'> [^\\n]*(doc|unit)test[.]py\\(\\d+\\)'),\n r'\\1test.py(NNN)'),\n (re.compile(r'[.]py\\(\\d+\\)'), r'.py(NNN)'),\n (re.compile(r'[.]py:\\d+'), r'.py:NNN'),\n (re.compile(r' line \\d+,', re.IGNORECASE), r' Line NNN,'),\n (re.compile(r' line {([a-z]+)}\\d+{', re.IGNORECASE), r' Line {\\1}NNN{'),\n\n # omit traceback entries for unittest.py or doctest.py (and\n # their package variants) from output:\n (re.compile(r'^ +File \"[^\\n]*(doctest|unittest|case)(/__init__)?.py\", [^\\n]+\\n[^\\n]+\\n',\n re.MULTILINE),\n r''),\n (re.compile(r'^{\\w+} +File \"{\\w+}[^\\n]*(doctest|unittest|case)(/__init__)?.py{\\w+}\", [^\\n]+\\n[^\\n]+\\n',\n re.MULTILINE),\n r''),\n #(re.compile('^> [^\\n]+->None$', re.M), '> ...->None'),\n (re.compile('import pdb; pdb'), 'Pdb()'), # Py 2.3\n\n # Python 3 exceptions are from the builtins module\n (re.compile(r'builtins\\.(SyntaxError|TypeError)'),\n r'exceptions.\\1'),\n\n # Python 3.3 has better exception messages\n (re.compile(\"ImportError: No module named '(?:[^']*[.])?([^'.]*)'\"),\n r'ImportError: No module named \\1'),\n\n # PyPy has different exception messages too\n (re.compile(\"ImportError: No module named (?:[a-zA-Z_0-9.]*[.])?([a-zA-Z_0-9]*)\"),\n r'ImportError: No module named \\1'),\n (re.compile(\"NameError: global name '([^']*)' is not defined\"),\n r\"NameError: name '\\1' is not defined\"),\n\n ])\nelse:\n #*nix\n checker = renormalizing.RENormalizing([\n # 2.5 changed the way pdb reports exceptions\n (re.compile(r\":\"),\n r'exceptions.\\1Error:'),\n\n #rewrite pdb prompt to ... the current location\n (re.compile('^> [^\\n]+->None$', re.M), '> ...->None'),\n\n (re.compile(r\"\"),(r'?')),\n (re.compile(r\":\"),\n r'exceptions.\\1Error:'),\n\n #this is a magic to put linefeeds into the doctest\n #on win it takes one step, linux is crazy about the same...\n (re.compile('##r##'), r'\\r'),\n (re.compile(r'\\r'), '\\\\\\\\r\\n'),\n\n (re.compile(r'\\d+[.]\\d\\d\\d seconds'), 'N.NNN seconds'),\n (re.compile(r'\\d+[.]\\d\\d\\d s'), 'N.NNN s'),\n (re.compile(r'\\d+[.]\\d\\d\\d{'), 'N.NNN{'),\n (re.compile(r'\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d\\.\\d+'),\n 'YYYY-MM-DD HH:MM:SS.mmmmmm'),\n (re.compile('( |\"|\\')[^\\'\\n]+testrunner-ex'), r'\\1testrunner-ex'),\n (re.compile('( |\"|\\')[^\\'\\n]+testrunner.py'), r'\\1testrunner.py'),\n (re.compile(r'> [^\\n]*(doc|unit)test[.]py\\(\\d+\\)'),\n r'\\1test.py(NNN)'),\n (re.compile(r'[.]py\\(\\d+\\)'), r'.py(NNN)'),\n (re.compile(r'[.]py:\\d+'), r'.py:NNN'),\n (re.compile(r' line \\d+,', re.IGNORECASE), r' Line NNN,'),\n (re.compile(r' line {([a-z]+)}\\d+{', re.IGNORECASE), r' Line {\\1}NNN{'),\n\n # testtools content formatter is used to mime-encode\n # tracebacks when the SubunitOutputFormatter is used, and the\n # resulting text includes a size which can vary depending on\n # the path included in the traceback.\n (re.compile(r'traceback\\n[A-F\\d]+', re.MULTILINE),\n r'traceback\\nNNN'),\n\n # omit traceback entries for unittest.py or doctest.py (and\n # their package variants) from output:\n (re.compile(r'^ +File \"[^\\n]*(doctest|unittest|case)(/__init__)?.py\", [^\\n]+\\n[^\\n]+\\n',\n re.MULTILINE),\n r''),\n (re.compile(r'^{\\w+} +File \"{\\w+}[^\\n]*(doctest|unittest|case)(/__init__)?.py{\\w+}\", [^\\n]+\\n[^\\n]+\\n',\n re.MULTILINE),\n r''),\n (re.compile('import pdb; pdb'), 'Pdb()'), # Py 2.3\n\n # Python 3 exceptions are from the builtins module\n (re.compile(r'builtins\\.(SyntaxError|TypeError)'),\n r'exceptions.\\1'),\n\n # Python 3.3 has better exception messages\n (re.compile(\"ImportError: No module named '(?:[^']*[.])?([^'.]*)'\"),\n r'ImportError: No module named \\1'),\n\n # PyPy has different exception messages too\n (re.compile(\"ImportError: No module named (?:[a-zA-Z_0-9.]*[.])?([a-zA-Z_0-9]*)\"),\n r'ImportError: No module named \\1'),\n (re.compile(\"NameError: global name '([^']*)' is not defined\"),\n r\"NameError: name '\\1' is not defined\"),\n\n ])\n\ndef setUp(test):\n test.globs['print_function'] = print_function\n test.globs['saved-sys-info'] = (\n sys.path[:],\n sys.argv[:],\n sys.modules.copy(),\n )\n if hasattr(gc, 'get_threshold'):\n test.globs['saved-gc-threshold'] = gc.get_threshold()\n test.globs['this_directory'] = os.path.split(__file__)[0]\n test.globs['testrunner_script'] = sys.argv[0]\n\n\ndef tearDown(test):\n sys.path[:], sys.argv[:] = test.globs['saved-sys-info'][:2]\n if hasattr(gc, 'get_threshold'):\n gc.set_threshold(*test.globs['saved-gc-threshold'])\n sys.modules.clear()\n sys.modules.update(test.globs['saved-sys-info'][2])\n\n\ndef test_suite():\n suites = [\n doctest.DocFileSuite(\n 'testrunner-arguments.txt',\n 'testrunner-coverage.txt',\n 'testrunner-debugging-layer-setup.test',\n 'testrunner-debugging-import-failure.test',\n 'testrunner-debugging-nonprintable-exc.test',\n 'testrunner-debugging.txt',\n 'testrunner-edge-cases.txt',\n 'testrunner-errors.txt',\n 'testrunner-layers-api.txt',\n 'testrunner-layers-instances.txt',\n 'testrunner-layers-buff.txt',\n 'testrunner-subprocess-errors.txt',\n 'testrunner-layers-cantfind.txt',\n 'testrunner-layers-cwd.txt',\n 'testrunner-layers-ntd.txt',\n 'testrunner-layers-topological-sort.txt',\n 'testrunner-layers.txt',\n 'testrunner-progress.txt',\n 'testrunner-colors.txt',\n 'testrunner-simple.txt',\n 'testrunner-nestedcode.txt',\n 'testrunner-test-selection.txt',\n 'testrunner-verbose.txt',\n 'testrunner-repeat.txt',\n 'testrunner-knit.txt',\n 'testrunner-shuffle.txt',\n 'testrunner-eggsupport.txt',\n 'testrunner-stops-when-stop-on-error.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker=checker),\n doctest.DocTestSuite('zope.testrunner'),\n doctest.DocTestSuite('zope.testrunner.coverage',\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE),\n doctest.DocTestSuite('zope.testrunner.options'),\n doctest.DocTestSuite('zope.testrunner.find'),\n ]\n\n # PyPy uses a different garbage collector\n if hasattr(gc, 'get_threshold'):\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-gc.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker=checker))\n\n # PyPy does not support sourceless imports, apparently (tried version 1.9)\n if 'PyPy' not in sys.version:\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-wo-source.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker=checker))\n\n if sys.platform == 'win32':\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-coverage-win32.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker=checker))\n\n # Python <= 2.4.1 had a bug that prevented hotshot from running in\n # non-optimize mode\n if sys.version_info[:3] > (2,4,1) or not __debug__:\n # some Linux distributions don't include the profiling module (which\n # hotshot.stats depends on)\n try:\n import hotshot.stats\n except ImportError:\n pass\n else:\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-profiling.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker = renormalizing.RENormalizing([\n (re.compile(r'tests_profile[.]\\S*[.]prof'),\n 'tests_profile.*.prof'),\n ]),\n )\n )\n try:\n import cProfile\n import pstats\n except ImportError:\n pass\n else:\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-profiling-cprofiler.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker = renormalizing.RENormalizing([\n (re.compile(r'tests_profile[.]\\S*[.]prof'),\n 'tests_profile.*.prof'),\n ]),\n )\n )\n\n skip_feature = True\n if sys.version_info < (2, 7, 0):\n try:\n import unittest2\n except ImportError:\n skip_feature = False\n\n if skip_feature:\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-report-skipped.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker=checker)\n )\n\n if hasattr(sys, 'gettotalrefcount'):\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-leaks.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker = renormalizing.RENormalizing([\n (re.compile(r'\\d+[.]\\d\\d\\d seconds'), 'N.NNN seconds'),\n (re.compile(r'sys refcount=\\d+ +change=\\d+'),\n 'sys refcount=NNNNNN change=NN'),\n (re.compile(r'sum detail refcount=\\d+ +'),\n 'sum detail refcount=NNNNNN '),\n (re.compile(r'total +\\d+ +\\d+'),\n 'total NNNN NNNN'),\n (re.compile(r\"^ +(int|type) +-?\\d+ +-?\\d+ *\\n\", re.M),\n ''),\n ]),\n\n )\n )\n else:\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-leaks-err.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker=checker,\n )\n )\n\n try:\n import subunit\n except ImportError:\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-subunit-err.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE,\n checker=checker))\n else:\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-subunit.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS +\n doctest.NORMALIZE_WHITESPACE +\n doctest.REPORT_NDIFF,\n checker=checker))\n if hasattr(sys, 'gettotalrefcount'):\n suites.append(\n doctest.DocFileSuite(\n 'testrunner-subunit-leaks.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE,\n checker=checker))\n\n if sys.version_info[:3] >= (2,7,0):\n # Python 2.7 adds support for unittest.expectedFailure\n suites.append(doctest.DocFileSuite(\n 'testrunner-unexpected-success.txt',\n setUp=setUp, tearDown=tearDown,\n optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,\n checker=checker))\n\n return unittest.TestSuite(suites)\n","sub_path":"src/zope/testrunner/tests/test_doctest.py","file_name":"test_doctest.py","file_ext":"py","file_size_in_byte":15069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490887112","text":"\"\"\"\nmiddle 2021-12-14 2维DP\n题目:求自顶向下的最小路径和。定义dp[i][j]为从点(i,j)到底边的最小路径和\n[i,j]\n[i+1,j] [i+1,j+1]\nhttps://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/\n需要注意的是,从下到上,那么dp[i]由dp[i+1]转化而来!!!\n\"\"\"\nclass Solution(object):\n def minimumTotal(self, triangle):\n m = len(triangle) # m行m列\n dp = [[0]*(m+1) for _ in range(m+1)] # (i,j)点到底边的最小路径和\n # 初始化边界\n # for j in range(m):\n # dp[m-1][j] = triangle[m-1][j] # 但是triangle没有index=4\n for i in range(m-1, -1, -1):\n for j in range(i, -1, -1):\n # 到达[i,j]的最短路径\n dp[i][j] = min(dp[i+1][j],dp[i+1][j+1])+triangle[i][j]\n ## 从三角形的最后一行开始递推,如下循环也ok\n # for (int i = n - 1; i >= 0; i--) {\n # for (int j = 0; j <= i; j++) {\n return dp[0][0]\n\n # 2022-02-28\n def minimumTotal_mine(self, triangle):\n if not triangle and not triangle[-1]:return 0\n\n m,n=len(triangle),len(triangle[-1])\n dp = [[0]*n for _ in range(m)]\n\n for i in range(n):\n dp[m-1][i] = triangle[m-1][i]\n\n for i in range(m-2,-1,-1): # m行n列\n for j in range(n-1,-1,-1):\n if j>i:continue\n dp[i][j] = min(dp[i+1][j], dp[i+1][j+1])+triangle[i][j]\n return dp[0][0]\n\nif __name__ == '__main__':\n # triangle = [[-10]]\n triangle = [[2],[3,4],[6,5,7],[4,1,8,3]]\n# [2]\n# [3, 4]\n# [6, 5, 7]\n# [4, 1, 8, 3]\n print(Solution().minimumTotal(triangle))","sub_path":"07_动态规划/2维DP/120-三角形最小路径和.py","file_name":"120-三角形最小路径和.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"339447171","text":"#coding=utf-8\n\nimport unittest\n\n\"\"\"\nFlatten Nested List Iterator\n\nGiven a nested list of integers, implement an iterator to flatten it.\n\nEach element is either an integer, or a list -- whose elements may also be integers or other lists.\n\n Notice\n\nYou don't need to implement the remove method.\n\nHave you met this question in a real interview? Yes\nExample\nGiven the list [[1,1],2,[1,1]], By calling next repeatedly until hasNext returns false, the order of elements returned \nby next should be: [1,1,2,1,1].\n\nGiven the list [1,[4,[6]]], By calling next repeatedly until hasNext returns false, the order of elements returned by \nnext should be: [1,4,6].\n\nTags \nStack Recursion Data Structure Design Snapchat Google\nRelated Problems \nMedium Flatten 2D Vector 46 %\nEasy Nested List Weight Sum 45 %\nMedium Zigzag Iterator II 30 %\nMedium Zigzag Iterator 42 %\nEasy Flatten Binary Tree to Linked List\n\n\"\"\"\n\n\n# \"\"\"\n# This is the interface that allows for creating nested lists.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n# class NestedInteger(object):\n# def isInteger(self):\n# \"\"\"\n# @return {boolean} True if this NestedInteger holds a single integer,\n# rather than a nested list.\n# \"\"\"\n#\n# def getInteger(self):\n# \"\"\"\n# @return {int} the single integer that this NestedInteger holds,\n# if it holds a single integer\n# Return None if this NestedInteger holds a nested list\n# \"\"\"\n#\n# def getList(self):\n# \"\"\"\n# @return {NestedInteger[]} the nested list that this NestedInteger holds,\n# if it holds a nested list\n# Return None if this NestedInteger holds a single integer\n# \"\"\"\n\nclass NestedIterator(object):\n def __init__(self, nestedList):\n # Initialize your data structure here.\n # if not nestedList:\n # raise Exception(\"Can't create iterator over None or empty inputs!\")\n # # ok to raise exception in lintcode, on leetcode just ignore\n self.data = []\n self.idx = 0\n _tmp = [ele for ele in nestedList]\n while _tmp:\n cur = _tmp.pop(0)\n if cur.isInteger():\n self.data.append(cur.getInteger())\n else:\n cur_list = cur.getList()\n for idx, ele in enumerate(cur_list):\n _tmp.insert(idx, ele)\n\n # @return {int} the next element in the iteration\n def next(self):\n # Write your code here\n if self.hasNext():\n self.idx += 1\n return self.data[self.idx - 1]\n\n # @return {boolean} true if the iteration has more element or false\n def hasNext(self):\n # Write your code here\n return self.idx < len(self.data)\n\n\n\n\nclass NestedIterator_wrong(object):\n \"\"\"\n Input\n [[1,1],2,[1,1]]\n Expected\n [1,1,2,1,1]\n Error Message\n Traceback (most recent call last): File \"Main.py\", line 12, in i, v = NestedIterator(nestedList), [] \n File \"NestedIterator.py\", line 38, in __init__ for idx, ele in enumerate(tmp): TypeError: 'NestedInteger' object is \n not iterable EXITCODE=1\n \n \"\"\"\n def __init__(self, nestedList):\n # Initialize your data structure here.\n self.data = []\n self.idx = -1\n while nestedList:\n tmp = nestedList.pop(0)\n if isinstance(tmp, int):\n self.data.append(tmp)\n else:\n for idx, ele in enumerate(tmp):\n nestedList.insert(idx, ele)\n\n # @return {int} the next element in the iteration\n def next(self):\n # Write your code here\n tmp = self.data.pop(0)\n return tmp\n\n # @return {boolean} true if the iteration has more element or false\n def hasNext(self):\n # Write your code here\n return len(self.data) > 0\n\n\n# Write your code here\n\n\n# Your NestedIterator object will be instantiated and called as such:\n# i, v = NestedIterator(nestedList), []\n# while i.hasNext(): v.append(i.next())\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case2(self):\n nums = [4,[3,[2,[1]]]]\n answer = [4,3,2,1]\n result = self.sol.flatten(nums)\n self.assertEqual(answer, result)\n\n\n def test_case1(self):\n nums = [1,2,[1,2]]\n answer = [1,2,1,2]\n result = self.sol.flatten(nums)\n self.assertEqual(answer, result)\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n#-*- coding:utf-8 -*-\n","sub_path":"freq/flatten_nested_list_iterator.py","file_name":"flatten_nested_list_iterator.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"78112769","text":"\"\"\"\nQuestion: Write a program that accepts a comma separated sequence of words as input and prints the words \nin a comma-separated sequence after sorting them alphabetically. Suppose the following input is supplied \nto the program: without,hello,bag,world Then, the output should be: bag,hello,without,world\n\"\"\"\ndef main():\n strs_list = [x for x in input().split(\",\")]\n strs = sorted(strs_list)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"programming-exercises/08-列表排序.py","file_name":"08-列表排序.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"137140055","text":"#\n#-----------------------------------------------------------------------------\n# This source file is part of Terminal_G33k\n# Copyright (c) 2005 The Terminal_G33k Team\n# Also see acknowledgements in Readme.txt\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 2.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n# or go to http://www.gnu.org/copyleft/gpl.txt\n# -----------------------------------------------------------------------------\n\n\nimport glob\n\nengine_files = glob.glob(\"engine/*.cc\")\ngame_files = glob.glob(\"game/*.cc\")\n\ntglib_dir = ['engine/tglib','engine/tglib/math','engine/tglib/bvolume','engine/tglib/containers','engine/tglib/stl']\n\n#tglib file list creation from tglib_dir\ntglib_files = [glob.glob(dir+\"/*.cc\") for dir in tglib_dir]\n\n\n#command-line options parsing\noptions = {}\noptions['debug'] = ARGUMENTS.get('debug',1)\noptions['optimize'] = ARGUMENTS.get('optimize',0)\noptions['profile'] = ARGUMENTS.get('profile',0)\n\nif int(options['optimize']):\n\toptions['debug'] = 0\n\n#options specific flags\nopt_cflags = ' -Wall '\nopt_ldflags = ''\nopt_libs = ''\nif int(options['debug']):\n\topt_cflags += '-DDEBUG -ggdb '\n\topt_libs = 'mcheck'\n\topt_ldflags = '-lmcheck'\nif int(options['optimize']):\n\topt_cflags += '-O1'\nif int(options['profile']):\n\topt_ldflags += '-pg'\n\n\nenv_eng = Environment(CPPPATH=['engine','engine/interface','game/interface'],\n CXX='g++',\n\t\t\tCXXFLAGS='-DTGLINUX -I/usr/include/SDL '+opt_cflags,\n\t\t\tLIBS=['dl','SDL','SDL_image','m','GL','GLU','tglib',opt_libs],\n\t\t\tLDFLAGS=opt_ldflags,\n\t\t\tLIBPATH='.')\n\nenv_gam = Environment(CPPPATH=['game','engine/interface','game/interface'],\n CXX='g++',\n\t\t\tCXXFLAGS='-DTGLINUX -Wall '+opt_cflags,\n\t\t\tLDFLAGS='-shared'+opt_ldflags)\n\nenv_tgl = Environment(CPPPATH=tglib_dir.extend('engine/interface'),\n CXX='g++',\n\t\t\tCXXFLAGS='-DTGLINUX -Wall '+opt_cflags,\n\t\t\tLDFLAGS='-shared'+opt_ldflags)\n\n\nenv_tgl.SharedLibrary(target='tglib',source=tglib_files)\nenv_eng.Program(target='tg',source=engine_files)\nenv_gam.SharedLibrary(target='game',source=game_files)\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"242918879","text":"# preprocess\nimport math\nimport os, sys\nimport numpy as np\n\n\nclass Edge(object):\n idx1 = 0\n idx2 = 0\n tri_list = []\n \"\"\"docstring for Edge\"\"\"\n def __init__(self, id1, id2, tid):\n super(Edge, self).__init__()\n self.idx1 = id1 if id1 < id2 else id2\n self.idx2 = id2 if id1 < id2 else id1\n self.tri_list.append(tid)\n\n\nclass Tri(object):\n tri_id = -1\n tri_vert = [] # three id\n tri_edges = [] # three edge objects\n\n \"\"\"docstring for Tri\"\"\"\n def __init__(self, id1, id2, id3, face_idx):\n super(Tri, self).__init__()\n self.tri_id = face_idx\n self.tri_vert = [id1, id2, id3]\n eg1 = Edge(id1, id2, face_idx)\n eg2 = Edge(id2, id3, face_idx)\n eg3 = Edge(id3, id1, face_idx)\n self.tri_edges = [eg1, eg2, eg3]\n\n\n# vert, vel, faces --> list \n# edges(dict): key: vertex-pair(tuple), value: triangle ids sharing this edge\ndef obj_loader(file_name, vert, vel, edges, faces):\n face_idx = 0\n with open(file_name, \"r\") as f1:\n for line in f1:\n s = line.strip().split(' ')\n if s[0] == 'v':\n vert.append([float(x) for x in s[1:]])\n elif s[0] == 'nv':\n vel.extend([float(x) for x in s[1:]])\n elif s[0] == 'f':\n id1 = int(s[1].strip().split('/')[0]) - 1 # index start at 1\n id2 = int(s[2].strip().split('/')[0]) - 1\n id3 = int(s[3].strip().split('/')[0]) - 1\n # add to the edge dictionary\n v = sorted([id1, id2, id3])\n if not edges.get((v[0], v[1])):\n edges[(v[0], v[1])] = [face_idx]\n else:\n edges[(v[0], v[1])].append(face_idx)\n if not edges.get((v[1], v[2])):\n edges[(v[1], v[2])] = [face_idx]\n else:\n edges[(v[1], v[2])].append(face_idx)\n if not edges.get((v[0], v[2])):\n edges[(v[0], v[2])] = [face_idx]\n else:\n edges[(v[0], v[2])].append(face_idx)\n # add to face list\n faces[face_idx] = Tri(id1, id2, id3, face_idx)\n face_idx += 1\n \n\ndef is_same_edge(e1, e2):\n if e1.idx1 == e2.idx1 and e1.idx2 == e2.idx2:\n return True\n else:\n return False\n\n\n# find the vertex that an edge is facing in a triangle\n# return vertex index\ndef vert_for_edge(tri, edge):\n vertices = tri.tri_vert\n for v in vertices:\n if edge.idx1 != v and edge.idx2 != v:\n return v\n\n\n# find other two edges in current triangle besides given edge\ndef other_two_edges(tri, e):\n e_list = tri.tri_edges\n other_e = []\n for item in e_list:\n if not is_same_edge(item, e):\n other_e.append(item)\n return other_e\n\n\n# input: vert & faces\n# output tri_nb: local vertices per row * tri_num\n# n: per tri info data, 3 for one triangle position only, 6 for triangle with one layer neighbor, x... with velocity etc...\ndef comp_mtx(vert, edges, faces, n=3):\n vert_num = len(vert)\n tri_num = len(faces)\n dim = [tri_num, vert_num]\n # print dim\n\n # mtx = np.array([np.zeros(tri_num*3) for item in range(vert_num)])\n mtx = np.array([np.zeros(tri_num * n) for item in range(vert_num)])\n count = np.zeros((vert_num, 1))\n # print \">>> mtx shape: \", mtx.shape\n\n # new_edges = []\n for i in range(0, tri_num):\n [id1, id2, id3] = faces[i].tri_vert\n # original vertex in index matrix\n mtx[id1][i * n] = 1\n mtx[id2][i * n + 1] = 1\n mtx[id3][i * n + 2] = 1\n count[id1][0] += 1.0\n count[id2][0] += 1.0\n count[id3][0] += 1.0\n\n # # for the neighbors, get shared edge and corresponding vertex\n # for j in range(0, len(faces[i].tri_edges)):\n # ed = faces[i].tri_edges[j]\n # # retrieve the tri_list for the dictionary\n # shared_tri = edges[(ed.idx1, ed.idx2)]\n # if len(shared_tri) > 1:\n # other_tri = shared_tri[1] if shared_tri[0] == i else shared_tri[0]\n # new_vert_id = vert_for_edge(faces[other_tri], ed)\n # # add to index matrix\n # # mtx[new_vert_id][i * 6 + 3 + j] = 1\n # mtx[new_vert_id][i * n + 3 + j] = 1\n # count[new_vert_id][0] += 1.0\n\n mtx_1 = mtx\n mtx = mtx_1 / count\n\n return dim, mtx, mtx_1\n\n\ndef find_neighbors(vert, edges, faces, n=1):\n vert_num = len(vert)\n tri_num = len(faces)\n tri_nb = [0] * tri_num\n # print(vert_num, tri_num, tri_nb) 700 x 1292\n # new_edges = []\n for i in range(0, tri_num):\n # print \"i:{}\".format(i)\n [id1, id2, id3] = faces[i].tri_vert\n # original vertex position\n tri_nb.extend(vert[id1])\n tri_nb.extend(vert[id2])\n tri_nb.extend(vert[id3])\n # while n > 0:\n # n = n - 1\n # add neighbors\n for j in range(0, len(faces[i].tri_edges)):\n ed = faces[i].tri_edges[j]\n # retrieve the tri_list for the dictionary\n shared_tri = edges[(ed.idx1, ed.idx2)]\n if len(shared_tri) > 1:\n other_tri = shared_tri[1] if shared_tri[0] == i else shared_tri[0]\n new_vert_id = vert_for_edge(faces[other_tri], ed)\n tri_nb.extend(vert[new_vert_id])\n # new_edges.extend(other_two_edges(faces[other_tri], ed))\n else:\n tri_nb.extend([0.0, 0.0, 0.0]) # zero padding\n\n return tri_nb\n\n\n# main\n# input: obj (subdivided coarse mesh)\n# return position matrix: local vertices per row * tri_num (6*3 x tri_num)\ndef meshmtx_wnb(file_name):\n vert = []\n vel = []\n edges = {}\n faces = {}\n\n obj_loader(file_name, vert, vel, edges, faces)\n dim, mtx, mtx_1 = comp_mtx(vert, edges, faces)\n\n return dim, mtx, mtx_1\n\n\ndef load_batch(file_name, batch_data, holdings=[]):\n vert = []\n vel = []\n edges = {}\n faces = {}\n obj_loader(file_name, vert, vel, edges, faces)\n\n # tri_nb = find_neighbors(vert, edges, faces)\n # batch_data.append(tri_nb)\n\n if len(holdings) is not 0:\n for i, x in enumerate(holdings):\n del vert[x - i]\n\n batch_data.append(vert)\n\n","sub_path":"smooth_pool_9/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"211947581","text":"from __future__ import print_function # so print doesn't show brackets\n\nimport numpy as np\nimport sys\nimport warnings\nimport copy\n\nimport scipy as sp\nimport qinfer as qi\nimport time\n\nimport qmla.shared_functionality.experimental_data_processing\nimport qmla.get_exploration_strategy\nimport qmla.memory_tests\nimport qmla.shared_functionality.probe_set_generation\nimport qmla.construct_models\nimport qmla.logging \n\nglobal_print_loc = False\nglobal debug_print\ndebug_print = False\nglobal debug_mode\ndebug_mode = True\nglobal debug_print_file_line\ndebug_print_file_line = False\n\n\nclass QInferModelQMLA(qi.FiniteOutcomeModel):\n r\"\"\"\n Interface between QMLA and QInfer.\n\n QInfer is a library for performing Bayesian inference\n on quantum data for parameter estimation.\n It underlies the Quantum Hamiltonian Learning subroutine\n employed within QMLA.\n Bayesian inference relies on comparisons likelihoods\n of the target and candidate system. \n This class, specified by an exploration strategy, defines how to \n compute the likelihood for the user's system. \n Most functionality is inherited from QInfer, but methods listed \n here are edited for QMLA's needs. \n The likelihood function given here should suffice for most QMLA \n implementations, though users may want to overwrite \n get_system_pr0_array and get_simulator_pr0_array, \n for instance to specify which experimental data points to use. \n \n :param str model_name: Unique string representing a model.\n :param np.ndarray modelparams: list of parameters to multiply by operators, \n unused for QMLA reasons but required by QInfer. \n :param np.ndarray oplist: Set of operators whose sum\n defines the evolution Hamiltonian \n (where each operator is associated with a distinct parameter).\n :param np.ndarray true_oplist: list of operators of the target system,\n used to construct true hamiltonian.\n :param np.ndarray trueparams: list of parameters of the target system,\n used to construct true hamiltonian.\n :param int num_probes: number of probes available in the probe sets, \n used to loop through probe set\n :param dict probe_dict: set of probe states to be used during training\n for the system, indexed by (probe_id, num_qubits). \n :param dict sim_probe_dict: set of probe states to be used during training\n for the simulator, indexed by (probe_id, num_qubits). Usually the same as \n the system probes, but not always. \n :param str exploration_rule: string corresponding to a unique exploration strategy,\n used to generate an explorationStrategy_ instance.\n :param dict experimental_measurements: fixed measurements of the target system, \n indexed by time.\n :param list experimental_measurement_times: times indexed in experimental_measurements.\n :param str log_file: Path of log file.\n \"\"\"\n\n ## INITIALIZER ##\n\n def __init__(\n self,\n model_name,\n modelparams,\n oplist,\n true_oplist,\n truename,\n true_param_dict,\n trueparams,\n num_probes,\n probe_dict,\n sim_probe_dict,\n exploration_rules,\n experimental_measurements,\n experimental_measurement_times,\n log_file,\n qmla_id=-1, \n evaluation_model=False,\n estimated_params=None,\n comparison_model=False, \n debug_mode=False,\n **kwargs\n ):\n self.model_name = model_name\n self.log_file = log_file\n self.qmla_id = qmla_id\n self.exploration_rules = exploration_rules\n self._oplist = oplist\n self._a = 0\n self._b = 0\n self.probe_counter = 0\n self.probe_rotation_frequency = 10\n self._modelparams = modelparams\n self.signs_of_inital_params = np.sign(modelparams)\n self._true_oplist = true_oplist\n self._trueparams = trueparams\n self._truename = truename\n self._true_dim = qmla.construct_models.get_num_qubits(self._truename)\n self.true_param_dict = true_param_dict \n self.store_likelihoods = {x : {} for x in ['system', 'simulator_median', 'simulator_mean']}\n self.likelihood_calls = {_ : 0 for _ in ['system', 'simulator']}\n self.summarise_likelihoods = {\n x : []\n for x in [\n 'system', \n 'particles_median', 'particles_mean',\n 'particles_std', 'particles_lower_quartile', 'particles_upper_quartile']\n }\n self.store_p0_diffs = []\n self.debug_mode = debug_mode\n # get true_hamiltonian from true_param dict\n self.log_print([\"True params dict:\", self.true_param_dict])\n true_ham = None\n for k in list(self.true_param_dict.keys()):\n param = self.true_param_dict[k]\n mtx = qmla.construct_models.compute(k)\n if true_ham is not None:\n true_ham += param * mtx\n else:\n true_ham = param * mtx\n self.true_hamiltonian = true_ham\n\n self.timings = {\n 'system': {}, \n 'simulator' : {}\n }\n for k in self.timings:\n self.timings[k] = {\n 'expectation_values' : 0, \n 'get_pr0' : 0,\n 'get_probe' : 0, \n 'construct_ham' : 0,\n 'storing_output' : 0,\n 'likelihood_array' : 0,\n 'likelihood' : 0, \n }\n self.calls_to_likelihood = 0 \n self.single_experiment_timings = {\n k : {} for k in ['system', 'simulator']\n }\n try:\n self.exploration_class = qmla.get_exploration_strategy.get_exploration_class(\n exploration_rules=self.exploration_rules,\n log_file=self.log_file,\n qmla_id=self.qmla_id, \n )\n except BaseException:\n self.log_print([\n \"Could not instantiate exploration strategy {}. Terminating\".foramt(\n self.exploration_rules\n )\n ])\n raise\n self.experimental_measurements = experimental_measurements\n self.experimental_measurement_times = experimental_measurement_times\n self.iqle_mode = self.exploration_class.iqle_mode \n self.comparison_model = comparison_model\n self.evaluation_model = evaluation_model\n if self.evaluation_model:\n self.estimated_params = estimated_params\n self.log_print([\n \"Evaluation qinfer model. Estimated parameters: {}\".format(\n self.estimated_params\n )\n ])\n estimated_model=None\n for i in range(len(self.estimated_params)):\n p = self.estimated_params[i]\n m = self._oplist[i]\n if estimated_model is None:\n estimated_model = p*m\n else:\n estimated_model += p*m\n self.estimated_model = estimated_model\n try:\n self.log_print([\n \"Estimated model's difference from true model\", \n np.max(np.abs(self.estimated_model - self.true_hamiltonian))\n ])\n except:\n # different dimension candidate from true model; doesn't really matter\n pass\n\n\n # Required by QInfer: \n self._min_freq = 0 # what does this do?\n self._solver = 'scipy'\n # This is the solver used for time evolution scipy is faster\n # QuTip can handle implicit time dependent likelihoods\n\n # self.model_dimension = qmla.construct_models.get_num_qubits(self.model_name)\n self.model_dimension = int(np.log2(self._oplist[0].shape[0]))\n self._true_dim = int(np.log2(self.true_hamiltonian.shape[0]))\n self.log_print([\"\\nModel {} dimension: {}. \".format(\n self.model_name, self.model_dimension\n )])\n if true_oplist is not None and trueparams is None:\n raise(\n ValueError(\n '\\nA system Hamiltonian with unknown \\\n parameters was requested'\n )\n )\n super(QInferModelQMLA, self).__init__(self._oplist)\n # self.log_print_debug([\n # \"true ops:\\n\", self._true_oplist,\n # \"\\nsim ops:\\n\", self._oplist\n # ])\n\n try:\n self.probe_dict = probe_dict\n self.sim_probe_dict = sim_probe_dict\n self.probe_number = num_probes\n except:\n raise ValueError(\n \"Probe dictionaries not passed to Qinfer model\"\n )\n self.log_print_debug([\n \"_trueparams:\", self._trueparams\n ])\n\n\n def log_print(\n self, \n to_print_list, \n log_identifier=None\n ):\n r\"\"\"Writng to unique QMLA instance log.\"\"\"\n if log_identifier is None: \n log_identifier = 'QInfer interface {}'.format(self.model_name)\n\n qmla.logging.print_to_log(\n to_print_list = to_print_list, \n log_file = self.log_file, \n log_identifier = log_identifier\n )\n\n def log_print_debug(\n self, \n to_print_list\n ):\n r\"\"\"Log print if global debug_mode set to True.\"\"\"\n\n if self.debug_mode:\n self.log_print(\n to_print_list = to_print_list,\n log_identifier = 'QInfer interface debug'\n )\n\n ## PROPERTIES ##\n @property\n def n_modelparams(self):\n r\"\"\"\n Number of parameters in the specific model \n typically, in QMLA, we have one parameter per model.\n \"\"\"\n\n return len(self._oplist)\n\n @property\n def modelparam_names(self):\n r\"\"\"\n Returns the names of the various model parameters admitted by this\n model, formatted as LaTeX strings. (Inherited from Qinfer)\n \"\"\"\n try:\n individual_term_names = self.model_name.split('+')\n except:\n individual_term_names = ['w0']\n for modpar in range(self.n_modelparams - 1):\n individual_term_names.append('w' + str(modpar + 1))\n \n return individual_term_names\n\n\n @property\n def expparams_dtype(self):\n r\"\"\"\n Returns the dtype of an experiment parameter array. \n \n For a model with single-parameter control, this will likely be a scalar dtype,\n such as ``\"float64\"``. More generally, this can be an example of a\n record type, such as ``[('time', py.'float64'), ('axis', 'uint8')]``.\n This property is assumed by inference engines to be constant for\n the lifetime of a Model instance.\n In the context of QMLA the expparams_dtype are assumed to be a list of tuple where\n the first element of the tuple identifies the parameters (including type) while the second element is\n the actual type of of the parameter, typicaly a float.\n (Modified from Qinfer).\n \"\"\"\n\n # expparams are the {t, probe_id, w1, w2, ...} guessed parameters, i.e. each \n # particle has a specific sampled value of the corresponding\n # parameter\n \n expnames = [\n ('t', 'float'),\n ('probe_id', 'int')\n ]\n try:\n individual_model_terms = self.model_name.split('+')\n except:\n individual_model_terms = [\n 'w_{}'.format(i)\n for i in range(self.n_modelparams)\n ]\n for term in individual_model_terms:\n expnames.append( (term, 'float') )\n\n return expnames\n\n ################################################################################\n # Methods\n ################################################################################\n\n def are_models_valid(self, modelparams):\n r\"\"\"\n Checks that the proposed models are valid.\n\n Before setting new distribution after resampling,\n checks that all parameters have same sign as the\n initial given parameter for that term.\n Otherwise, redraws the distribution.\n Modified from qinfer.\n \"\"\"\n\n same_sign_as_initial = False\n if same_sign_as_initial == True:\n new_signs = np.sign(modelparams)\n validity_by_signs = np.all(\n np.sign(modelparams) == self.signs_of_inital_params,\n axis=1\n )\n return validity_by_signs\n else:\n validity = np.all(np.abs(modelparams) > self._min_freq, axis=1)\n return validity\n\n def n_outcomes(self, expparams):\n r\"\"\"\n Returns an array of dtype ``uint`` describing the number of outcomes\n for each experiment specified by ``expparams``.\n\n :param numpy.ndarray expparams: Array of experimental parameters. This\n array must be of dtype agreeing with the ``expparams_dtype``\n property.\n \"\"\"\n return 2\n\n def likelihood(\n self,\n outcomes,\n modelparams,\n expparams\n ):\n r\"\"\"\n Function to calculate likelihoods for all the particles\n \n Inherited from Qinfer:\n Calculates the probability of each given outcome, conditioned on each\n given model parameter vector and each given experimental control setting.\n\n QMLA modifications: \n Given a list of experiments to perform, expparams, \n extract the time list. Typically we use a single experiment\n (therefore single time) per update.\n QInfer passes particles as modelparams.\n QMLA updates its knowledge in two steps:\n * \"simulate\" an experiment (which can include outsourcing from here to perform a real experiment), \n * update parameter distribution by comparing Np particles to the experimental result\n \n It is important that the comparison is fair, meaning:\n * The evolution time must be the same\n * The probe state to evolve must be the same.\n\n To simulate the experiment, we call QInfer's simulate_experiment,\n which calls likelihood(), passing a single particle. \n The update function calls simulate_experiment with Np particles. \n Therefore we know, when a single particle is passed to likelihood, \n that we want to call the true system (we know the true parameters \n and operators by the constructor of this class). \n So, when a single particle is detected, we circumvent QInfer by triggering\n get_system_pr0_array. Users can overwrite this function as desired; \n by default it computes true_hamiltonian, \n and computes the likelhood for the given time. \n When >1 particles are detected, pr0 is computed by constructing Np \n candidate Hamiltonians, each corresponding to a single particle, \n where particles are chosen by Qinfer and given as modelparams.\n This is done through get_simulator_pr0_array.\n We know calls to likelihood are coupled: \n one call for the system, and one for the update, \n which must use the same probes. Therefore probes are indexed\n by a probe_id as well as their dimension. \n We track calls to likelihood() in _a and increment the probe_id\n to pull every second call, to ensure the same probe_id is used for \n system and simulator.\n\n :param np.ndarray outcomes: outcomes of the experiments\n :param np.ndarray modelparams: \n values of the model parameters particles \n A shape ``(n_particles, n_modelparams)``\n array of model parameter vectors describing the hypotheses for\n which the likelihood function is to be calculated.\n \n :param np.ndarray expparams: \n experimental parameters, \n A shape ``(n_experiments, )`` array of\n experimental control settings, with ``dtype`` given by \n :attr:`~qinfer.Simulatable.expparams_dtype`, describing the\n experiments from which the given outcomes were drawn.\n \n :rtype: np.ndarray\n :return: A three-index tensor ``L[i, j, k]``, where ``i`` is the outcome\n being considered, ``j`` indexes which vector of model parameters was used,\n and where ``k`` indexes which experimental parameters where used.\n Each element ``L[i, j, k]`` then corresponds to the likelihood\n :math:`\\Pr(d_i | \\vec{x}_j; e_k)`.\n \"\"\"\n\n self.calls_to_likelihood+=1\n t_likelihood_start = time.time()\n super(QInferModelQMLA, self).likelihood(\n outcomes, modelparams, expparams\n ) # just adds to self._call_count (Qinfer abstact model class)\n\n # process expparams\n times = expparams['t'] # times to compute likelihood for. typicall only per experiment. \n probe_id = expparams['probe_id'][0]\n expparams_sampled_particle = np.array(\n [expparams.item(0)[2:]]) # TODO THIS IS DANGEROUS - DONT DO IT OUTSIDE OF TESTS \n self.log_print_debug([\n \"expparams_sampled_particle:\", expparams_sampled_particle\n ])\n self.ham_from_expparams = np.tensordot(\n expparams_sampled_particle, \n self._oplist, \n axes=1 \n )[0]\n\n num_particles = modelparams.shape[0]\n num_parameters = modelparams.shape[1]\n\n # assumption is that calls to likelihood are paired: \n # one for system, one for simulator\n # therefore the same probe should be assumed for consecutive calls\n # probe id is tracked with _a and _b.\n # i.e. increments each 2nd call, loops back when probe dict exhausted\n\n if num_particles == 1:\n # TODO better mechanism to determine if self.true_evolution, \n # rather than assuming 1 particle => system\n # call the system, use the true paramaters as a single particle, \n # to get the true evolution\n self.true_evolution = True\n params = [copy.deepcopy(self._trueparams)]\n else:\n self.true_evolution = False\n params = modelparams\n\n self.probe_counter = probe_id\n\n self.log_print_debug([\n \"\\n\\nLikelihood fnc called. Probe counter={}. True system -> {}.\".format(self.probe_counter, self.true_evolution)\n ])\n\n try:\n if self.true_evolution:\n t_init = time.time()\n # self.log_print([\"Getting system pr0\"])\n self.log_print_debug([\n \"Getting system Pr0 w/ params \", params\n ])\n pr0 = self.get_system_pr0_array(\n times=times,\n particles=params,\n )\n timing_marker = 'system'\n self.timings[timing_marker]['get_pr0'] += time.time() - t_init\n else:\n t_init = time.time()\n # self.log_print([\"Getting simulator pr0\"])\n self.log_print_debug([\n \"Getting simulator Pr0 w/ params \", params\n ])\n pr0 = self.get_simulator_pr0_array(\n times=times,\n particles=params,\n ) \n timing_marker = 'simulator'\n self.timings[timing_marker]['get_pr0'] += time.time() - t_init\n except:\n self.log_print([\n \"Failed to compute pr0. probe id used: {}\".format(self.probe_counter)\n ])\n # self.log_print([\"H_ for IQLE:\", self.ham_from_expparams[0]])\n raise # TODO raise specific error\n sys.exit()\n t_init = time.time()\n likelihood_array = (\n qi.FiniteOutcomeModel.pr0_to_likelihood_array(\n outcomes, pr0\n )\n )\n self.timings[timing_marker]['likelihood_array'] += time.time() - t_init\n self.single_experiment_timings[timing_marker]['likelihood'] = time.time() - t_likelihood_start\n\n self.log_print_debug([\n '\\ntrue_evo:', self.true_evolution,\n '\\nevolution times:', times,\n '\\nlen(outcomes):', len(outcomes),\n '\\n_a = {}, _b={}'.format(self._a, self._b),\n '\\nprobe counter:', self.probe_counter,\n '\\nexp:', expparams,\n '\\nOutcomes:', outcomes[:3],\n '\\nparticles:', params[:3],\n \"\\nPr0: \", pr0[:3], \n \"\\nLikelihood: \", likelihood_array[0][:3],\n \"\\nexpparams_sampled_particle:\", expparams_sampled_particle\n ])\n \n self.timings[timing_marker]['likelihood'] += time.time() - t_likelihood_start\n\n t_storage_start = time.time()\n if self.true_evolution: \n self.log_print_debug([\"Storing system likelihoods\"])\n self.store_likelihoods['system'][self.likelihood_calls['system']] = pr0\n self.summarise_likelihoods['system'].append(np.median(pr0))\n self.likelihood_calls['system'] += 1 \n else:\n self.store_likelihoods['simulator_mean'][self.likelihood_calls['simulator']] = np.mean(pr0)\n self.store_likelihoods['simulator_median'][self.likelihood_calls['simulator']] = np.median(pr0)\n diff_p0 = np.abs( pr0 - self.store_likelihoods['system'][self.likelihood_calls['simulator']] )\n self.store_p0_diffs.append( [np.median(diff_p0), np.std(diff_p0)] )\n self.summarise_likelihoods['particles_mean'].append( np.median(pr0) )\n self.summarise_likelihoods['particles_median'].append( np.median(pr0) )\n self.summarise_likelihoods['particles_std'].append( np.std(pr0) )\n self.summarise_likelihoods['particles_lower_quartile'].append( np.percentile(pr0, 25) )\n self.summarise_likelihoods['particles_upper_quartile'].append( np.percentile(pr0, 75) )\n self.likelihood_calls['simulator'] += 1 \n self.single_experiment_timings[timing_marker]['storage'] = time.time() - t_storage_start\n self.log_print_debug([\n \"Setting single_experiment_timings for {}[{}] -> {}\".format(\n timing_marker, 'storage', time.time() - t_storage_start\n )\n ])\n\n self.log_print_debug([\"Stored likelihoods\"])\n\n if self.evaluation_model:\n self.log_print_debug([\n \"\\nSystem evolution {}. t={} Likelihood={}\".format(\n self.true_evolution, times[0], likelihood_array[:3]\n )])\n \n return likelihood_array\n\n def get_system_pr0_array(\n self, \n times,\n particles, \n # **kwargs\n ):\n r\"\"\"\n Compute pr0 array for the system. \n # TODO compute e^(-iH) once for true Hamiltonian and use that rather than computing every step. \n\n For user specific data, or method to compute system data, replace this function \n in exploration_strategy.qinfer_model_subroutine. \n Here we pass the true operator list and true parameters to \n default_pr0_from_modelparams_times_.\n\n :param list times: times to compute pr0 for; usually single element.\n :param np.ndarry particles: list of parameter-lists, used to construct\n Hamiltonians. In this case, there should be a single particle\n corresponding to the true parameters. \n \n :returns np.ndarray pr0: probabilities of measuring specified outcome\n \"\"\"\n timing_marker = 'system'\n\n operator_list = self._true_oplist\n ham_num_qubits = self._true_dim\n # format of probe dict keys: (probe_id, qubit_number)\n # probe_counter controlled in likelihood method\n # probe = self.get_probe(\n # probe_id = self.probe_counter, \n # probe_set = \"system\"\n # )\n probe = self.probe_dict[\n self.probe_counter,\n self._true_dim \n ]\n # self.log_print([\n # \"\\nTrue Model {} has dim {} (operator shape {}) using system probe dimension: {}\".format(\n # self._truename, self._true_dim, np.shape(operator_list[0]), probe.shape),\n # # \"\\nTrue Model {} has shape {} with dimension {}\".format(self._truename, np.shape(operator_list[0]), self._true_dim)\n # ])\n\n # TODO: could just work with true_hamiltonian, worked out on __init__\n return self.default_pr0_from_modelparams_times(\n t_list = times,\n particles = particles, \n oplist = operator_list, \n # hamiltonian=self.true_hamiltonian, \n probe = probe, \n timing_marker=timing_marker\n # **kwargs\n )\n\n def get_simulator_pr0_array(\n self, \n particles, \n times,\n # **kwargs\n ):\n r\"\"\"\n Compute pr0 array for the simulator. \n\n For user specific data, or method to compute simulator data, replace this function \n in exploration_strategy.qinfer_model_subroutine. \n Here we pass the candidate model's operators and particles\n to default_pr0_from_modelparams_times_.\n\n :param list times: times to compute pr0 for; usually single element.\n :param np.ndarry particles: list of particles (parameter-lists), used to construct\n Hamiltonians. \n \n :returns np.ndarray pr0: probabilities of measuring specified outcome\n \"\"\"\n timing_marker = 'simulator'\n ham_num_qubits = self.model_dimension\n # format of probe dict keys: (probe_id, qubit_number)\n # probe_counter controlled in likelihood method\n t_init = time.time()\n\n probe = self.sim_probe_dict[\n self.probe_counter,\n self.model_dimension\n ]\n\n self.timings[timing_marker]['get_probe'] += time.time() - t_init\n operator_list = self._oplist\n if self.evaluation_model:\n # self.log_print_debug([\n self.log_print_debug([\n \"\\nUsing precomputed Hamiltonian. probe[0] (ID {}):\\n{}\".format(\n self.probe_counter, \n probe[0]\n )\n ])\n hamiltonian = self.estimated_model\n else:\n hamiltonian = None\n \n t_init = time.time()\n pr0 = self.default_pr0_from_modelparams_times(\n t_list = times, \n particles = particles, \n oplist = operator_list, \n probe = probe, \n hamiltonian=hamiltonian,\n timing_marker=timing_marker\n # **kwargs\n )\n return pr0\n\n def default_pr0_from_modelparams_times(\n self,\n t_list,\n particles,\n oplist,\n probe,\n timing_marker,\n hamiltonian=None,\n **kwargs\n ):\n r\"\"\"\n Compute probabilities of available outputs as an array.\n\n :param np.ndarray t_list: \n List of times on which to perform experiments\n :param np.ndarray particles: \n values of the model parameters particles \n A shape ``(n_particles, n_modelparams)``\n array of model parameter vectors describing the hypotheses for\n which the likelihood function is to be calculated.\n :param list oplist:\n list of the operators defining the model\n :param np.ndarray probe: quantum state to evolve\n\n :returns np.ndarray pr0: list of probabilities (one for each particle).\n The calculation, meaning and interpretation of these probabilities \n depends on the user defined ExplorationStrategy.expectation_value function. \n By default, it is the expecation value:\n | < probe.transpose | e^{-iHt} | probe > |**2,\n but can be replaced in the ExplorationStrategy_. \n \"\"\"\n\n from rq import timeouts\n if np.shape(probe)[0] < 4 : \n probe_to_print = probe\n else:\n probe_to_print = probe[0]\n\n self.log_print_debug([\n \"Getting pr0; true system ->\", self.true_evolution, \n \"\\n(part of) Probe (dimension {}): \\n {}\".format(\n np.shape(probe),\n probe_to_print,\n ),\n \"\\nTimes: \", t_list\n ])\n\n # if hamiltonian is not None: \n # self.log_print([\n # \"Hamiltonian passed:\\n\", hamiltonian\n # ])\n\n num_particles = len(particles)\n num_times = len(t_list)\n output = np.empty([num_particles, num_times])\n\n for evoId in range(num_particles): \n try:\n t_init = time.time()\n if hamiltonian is None:\n ham = np.tensordot(\n particles[evoId], oplist, axes=1\n )\n else: \n ham = hamiltonian\n\n if self.iqle_mode and self.true_evolution:\n # H to compute for IQLE on the system\n ham = self.true_hamiltonian - self.ham_from_expparams\n elif self.iqle_mode and not self.true_evolution:\n # H to compute for IQLE on the simulator\n ham = ham - self.ham_from_expparams\n if np.any(np.isnan(ham)):\n self.log_print([\"NaN detected in Hamiltonian. Ham from expparams:\", self.ham_from_expparams])\n\n self.timings[timing_marker]['construct_ham'] += time.time()-t_init\n except BaseException:\n self.log_print(\n [\n \"Failed to build Hamiltonian.\",\n \"\\nparticles:\", particles[evoId],\n \"\\noplist:\", oplist\n ],\n )\n raise\n # if evoId == 0:\n # self.log_print_debug([\n # \"\\nHamiltonian:\\n\", ham,\n # \"\\ntimes:\", t_list,\n # \"\\nH from expparams:\", self.ham_from_expparams\n # ])\n\n for tId in range(len(t_list)):\n\n t = t_list[tId]\n if t > 1e6: # Try limiting times to use to 1 million\n import random\n # random large number but still computable without error\n t = random.randint(1e6, 3e6)\n try:\n t_init = time.time()\n prob_meas_input_state = self.exploration_class.get_expectation_value(\n ham=ham,\n t=t,\n state=probe,\n log_file=self.log_file,\n log_identifier='get pr0 call exp val'\n )\n self.timings[timing_marker]['expectation_values'] += time.time() - t_init\n t_init = time.time()\n output[evoId][tId] = prob_meas_input_state\n self.timings[timing_marker]['storing_output'] += time.time() - t_init\n\n except NameError:\n self.log_print([\n \"Error raised; unphysical expecation value.\",\n \"\\nParticle:\\n\", particles[evoId],\n \"\\nt=\", t,\n ])\n sys.exit()\n except timeouts.JobTimeoutException:\n self.log_print([\n \"RQ Time exception.\",\n \"\\nParticle:\\n\", particles[evoId],\n \"\\nt=\", t,\n ])\n sys.exit()\n\n if output[evoId][tId] < 0:\n print(\"NEGATIVE PROB\")\n self.log_print([\n \"Negative probability : \\\n \\n probability = \",\n output[evoId][tId],\n \"\\nat t=\", t_list\n ])\n elif output[evoId][tId] > 1.001:\n self.log_print(\n [\n \"[QLE] Probability > 1: \\\n \\t \\t probability = \",\n output[evoId][tId]\n ]\n )\n return output\n\n\nclass QInferNVCentreExperiment(QInferModelQMLA):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_system_pr0_array(\n self, \n times,\n particles, \n **kwargs\n ):\n self.log_print_debug([\"Getting pr0 from experimental dataset.\"])\n # time = expparams['t']\n if len(times) > 1:\n self.log_print(\"Multiple times given to experimental true evolution:\", times)\n sys.exit()\n\n time = times[0]\n \n try:\n # If time already exists in experimental data\n experimental_expec_value = self.experimental_measurements[time]\n except BaseException:\n # map to nearest experimental time\n try:\n experimental_expec_value = qmla.shared_functionality.experimental_data_processing.nearest_experimental_expect_val_available(\n times=self.experimental_measurement_times,\n experimental_data=self.experimental_measurements,\n t=time\n )\n except:\n self.log_print_debug([\n \"Failed to get experimental data point\"\n ])\n raise\n self.log_print_debug([\n \"experimental value for t={}: {}\".format(\n time, \n experimental_expec_value\n )\n ])\n self.log_print_debug([\n \"Using experimental time\", time,\n \"\\texp val:\", experimental_expec_value\n ])\n pr0 = np.array([[experimental_expec_value]])\n self.log_print_debug([\n \"pr0 for system:\", pr0\n ])\n return pr0\n\n def get_simulator_pr0_array(\n self, \n particles, \n times,\n # **kwargs\n ):\n # map times to experimentally available times\n mapped_times = [\n qmla.shared_functionality.experimental_data_processing.nearest_experimental_time_available(\n times = self.experimental_measurement_times,\n t = t\n )\n for t in times\n ]\n return super().get_simulator_pr0_array(\n particles, \n mapped_times\n )\n\n\nclass QInferInterfaceJordanWigner(QInferModelQMLA):\n r\"\"\"\n For use when models are implemented via Jordan Wigner transformation, \n since this invokes 2 qubits per site in the system. \n Therefore, everything remains as in other models, \n apart from probe selection should use the appropriate probe id, \n but twice the number of qubits specified by the model. \n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_probe(\n self, \n probe_id, \n probe_set\n ):\n self.log_print([\n \"Using JW get_probe\"\n ])\n if probe_set == 'simulator':\n probe = self.sim_probe_dict[\n probe_id,\n 2*self.model_dimension ]\n return probe\n\n elif probe_set == 'system': \n # get dimension directly from true model since this can be generated by another ES \n # and therefore note require the 2-qubit-per-site overhead of Jordan Wigner.\n dimension = np.log2(np.shape(self.true_hamiltonian)[0])\n probe = self.probe_dict[\n probe_id,\n self._true_dim\n ]\n return probe\n else:\n self.log_print([\n \"get_probe must either act on simulator or system, received {}\".format(probe_set)\n ])\n raise ValueError(\n \"get_probe must either act on simulator or system, received {}\".format(probe_set)\n )\n\n\nclass QInferInterfaceAnalytical(QInferModelQMLA):\n r\"\"\"\n Analytically computes the likleihood for an exemplary case. \n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_system_pr0_array(\n self, \n times,\n particles, \n ):\n\n pr0 = np.empty([len(particles), len(times)])\n t = times[0]\n self.log_print_debug([\n \"(sys) particles:\", particles,\n \"time: \", t,\n \"\\n shapes: prt={} \\t times={}\".format(np.shape(particles), np.shape(times))\n ])\n\n for evoId in range(len(particles)):\n particle = particles[evoId][0]\n for t_id in range(len(times)):\n pr0[evoId][t_id] = (np.cos(particle * t / 2))**2\n\n return pr0\n\n def get_simulator_pr0_array(\n self, \n particles, \n times,\n # **kwargs\n ):\n pr0 = np.empty([len(particles), len(times)])\n t = times[0]\n self.log_print_debug([\n \"(sim) particles:\", particles,\n \"time: \", t,\n \"\\n shapes: prt={} \\t times={}\".format(np.shape(particles), np.shape(times))\n ])\n\n for evoId in range(len(particles)):\n particle = particles[evoId] \n for t_id in range(len(times)):\n pr0[evoId][t_id] = (np.cos(particle * t / 2))**2\n\n return pr0\n\n","sub_path":"qmla/shared_functionality/qinfer_model_interface.py","file_name":"qinfer_model_interface.py","file_ext":"py","file_size_in_byte":37564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"74152938","text":"\"\"\"\nA non-empty array A consisting of N integers is given. A pair of integers (P, Q), such that 0 ≤ P < Q < N, is called a slice of array A (notice that the slice contains at least two elements). The average of a slice (P, Q) is the sum of A[P] + A[P + 1] + ... + A[Q] divided by the length of the slice. To be precise, the average equals (A[P] + A[P + 1] + ... + A[Q]) / (Q − P + 1).\n\nFor example, array A such that:\n\n A[0] = 4\n A[1] = 2\n A[2] = 2\n A[3] = 5\n A[4] = 1\n A[5] = 5\n A[6] = 8\ncontains the following example slices:\n\nslice (1, 2), whose average is (2 + 2) / 2 = 2;\nslice (3, 4), whose average is (5 + 1) / 2 = 3;\nslice (1, 4), whose average is (2 + 2 + 5 + 1) / 4 = 2.5.\nThe goal is to find the starting position of a slice whose average is minimal.\n\nWrite a function:\n\ndef solution(A)\n\nthat, given a non-empty array A consisting of N integers, returns the starting position of the slice with the minimal average. If there is more than one slice with a minimal average, you should return the smallest starting position of such a slice.\n\nFor example, given array A such that:\n\n A[0] = 4\n A[1] = 2\n A[2] = 2\n A[3] = 5\n A[4] = 1\n A[5] = 5\n A[6] = 8\nthe function should return 1, as explained above.\n\nWrite an efficient algorithm for the following assumptions:\n\nN is an integer within the range [2..100,000];\neach element of array A is an integer within the range [−10,000..10,000].\nCopyright 2009–2021 by Codility Limited. All Rights Reserved. Unauthorized copying, publication or disclosure prohibited.\n\"\"\"\n\n# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef solution(A):\n presum = [[0]*len(A) for i in range(len(A))]\n min_avg = 10001\n start_pos = 0\n\n for i in range(len(A)):\n presum[i][i] = A[i]\n \n for k in range(1, 3):\n for j in range(0, len(A)-k):\n presum[j][j+k] = presum[j][j+k-1] + A[j+k]\n avg = presum[j][j+k] / (k+1)\n if avg < min_avg:\n start_pos = j\n min_avg = avg\n\n return start_pos\n\n# O(N ** 2) timeout\n# https://app.codility.com/demo/results/trainingB6KF4H-P9B/\n","sub_path":"codility/lessons/5.MinAvgTwoSlice O(N**2).py","file_name":"5.MinAvgTwoSlice O(N**2).py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"422628141","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\nimport http.client\nimport os\nimport random\nimport shutil\nfrom selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nimport time\n\nfrom clients.PrototypeClient import PrototypeClient\n\n\nclass WebClient(PrototypeClient.PrototypeClient):\n def __init__(self, **kwargs):\n super(WebClient, self).__init__(**kwargs)\n self._logger.debug(\"WebClient.__init__()\")\n\n self._browser = None\n\n self._default_config.update({\n \"webdriver\": \"Firefox\",\n \"url_pool\": [\n \"https://google.de\",\n \"https://de.wikipedia.org/wiki/Wikipedia:Hauptseite\",\n \"https://heise.de\"\n ]\n })\n\n def prepare(self):\n self._logger.debug(\"WebClient.prepare()\")\n self.register_application()\n\n def run(self):\n self._logger.debug(\"WebClient.run()\")\n self._running = True\n\n if not self._browser:\n # no browser opened yet - load default config\n self._logger.info(\"No config applied yet - using default config\")\n self.set_config(config={})\n\n self._random_wait_start()\n\n while self._running:\n if not self._do_request():\n time.sleep(0.1)\n else:\n self._logger.debug(\"WebClient running\")\n\n # get next url\n url_pool = self._config[\"url_pool\"]\n url = url_pool[random.randint(0, len(url_pool) - 1)]\n\n # check url http/https otherwise add\n if not (url.startswith(\"http://\") or url.startswith(\"https://\")):\n url = \"http://\" + url\n\n # try except because ctrl+c in the right moment raises this exception - make sure it isn't raised unter other conditions\n try:\n # call a new site or do some interaction\n self.load_url(url=url)\n self._report_metric()\n except http.client.RemoteDisconnected as e:\n self._logger.warn(\"Caught Exception http.client.RemoteDisconnected: %s\" % (e))\n except Exception as e:\n self._logger.error(\"Browser get() exception %s\" % (e))\n\n self._request_finished()\n\n def clean_up(self):\n self._logger.debug(\"WebClient.clean_up()\")\n self.unregister_application()\n if self._browser:\n # TODO investigate difference between close() and quit() when dealing with multiple browser windows\n self._browser.quit()\n\n def _apply_config(self):\n self._logger.debug(\"WebClient._apply_config()\")\n\n if not self._browser:\n if self._config[\"webdriver\"] == \"Chrome\":\n chrome_options = webdriver.ChromeOptions()\n # chrome_options.add_argument(\"--incognito\")\n self._browser = webdriver.Chrome(chrome_options=chrome_options)\n elif self._config[\"webdriver\"] == \"Firefox\":\n firefox_options = webdriver.firefox.options.Options()\n firefox_options.add_argument(\"--private-window\")\n firefox_options.add_argument(\"--headless\")\n\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference(\"browser.cache.disk.enable\", False)\n firefox_profile.set_preference(\"browser.cache.memory.enable\", False)\n firefox_profile.set_preference(\"browser.cache.offline.enable\", False)\n firefox_profile.set_preference(\"network.http.use-cache\", False)\n firefox_profile.set_preference(\"media.gmp-provider.enabled\", False) # Disable Cisco OpenH264 codec download\n\n # add extension manually because selenium can't handle the new extension format\n xpi_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"extensions/firefox/cache_cleanup/cache_cleanup-1.0-an+fx.xpi\")\n addon_id = \"{27cf6b57-3c62-4ce1-89bd-2ea90d9d7457}\"\n extensions_path = os.path.join(firefox_profile.profile_dir, \"extensions\")\n addon_path = os.path.join(extensions_path, addon_id + \".xpi\")\n if not os.path.exists(extensions_path):\n os.makedirs(extensions_path)\n shutil.copy(xpi_path, addon_path)\n\n log_path = os.path.join(self._log_dir, \"geckodriver.log\")\n\n self._browser = webdriver.Firefox(firefox_profile=firefox_profile, firefox_options=firefox_options, log_path=log_path)\n elif self._config[\"webdriver\"] == \"PhantomJS\":\n self._browser = webdriver.PhantomJS()\n else:\n self._logger.error(\"Unrecognized webdriver %s\" % (self._config[\"webdriver\"]))\n pass\n\n # TODO\n # check if config specifies a other browser and change to specified\n\n def load_url(self, url):\n self._logger.debug(\"WebClient.load_url()\")\n if self._browser:\n self._logger.debug(\"getting %s\" % (url))\n\n # if CTRL+C was pressed get() raises \"BadStatusLine: ''\" exception while kill -SIGINT is ok\n try:\n self._browser.get(url)\n except Exception as e:\n raise e\n\n self.calculate_web_metric()\n\n def calculate_web_metric(self):\n self._logger.debug(\"WebClient.calculate_web_metric()\")\n metric = {\"timing\": self.get_timing(), \"url\": self._browser.current_url}\n self._create_metric(metric=metric)\n\n def get_timing(self):\n self._logger.debug(\"WebClient.get_timing()\")\n timing = self._browser.execute_script(\"return performance.timing;\")\n # delete method\n if \"toJSON\" in timing:\n del(timing[\"toJSON\"])\n # convert timings\n # TODO fallback if navigationStart key isn't there\n ref = timing[\"navigationStart\"]\n for key in timing:\n if timing[key] != 0:\n timing[key] -= ref\n\n try:\n navigation_timing = self._browser.execute_script(\"return performance.getEntriesByType('navigation');\")\n resource_timing = self._browser.execute_script(\"return performance.getEntriesByType('resource');\")\n except WebDriverException as e:\n navigation_timing = []\n resource_timing = []\n\n for entry in navigation_timing + resource_timing:\n if \"toJSON\" in entry:\n del(entry[\"toJSON\"])\n\n return {\"timing\": timing, \"navigation_timing\": navigation_timing, \"resource_timing\": resource_timing}\n\n # TODO\n # add interaction methods like clicking and text entering\n\n\nif __name__ == '__main__':\n\n from clients.StandaloneClient import StandaloneClient\n\n # start client\n client = StandaloneClient.StandaloneClient(client_class=WebClient)\n client.run()\n","sub_path":"clients/clients/WebClient/WebClient.py","file_name":"WebClient.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319552601","text":"# importing required module\r\nfrom playsound import playsound\r\nfrom tkinter import *\r\n\r\nroot = Tk()\r\nroot.title('GeeksforGeeks sound player') # giving the title for our window\r\nroot.geometry(\"500x400\")\r\n\r\n\r\n# making function\r\ndef play():\r\n playsound('sounds/placing.mp3')\r\n\r\n\r\n# title on the screen you can modify it\r\ntitle = Label(root, text=\"GeeksforGeeks\", bd=9, relief=GROOVE,\r\n font=(\"times new roman\", 50, \"bold\"), bg=\"white\", fg=\"green\")\r\ntitle.pack(side=TOP, fill=X)\r\n\r\n# making a button which trigger the function so sound can be playeed\r\nplay_button = Button(root, text=\"Play Song\", font=(\"Helvetica\", 32),\r\n relief=GROOVE, command=play)\r\nplay_button.pack(pady=20)\r\n\r\ninfo = Label(root, text=\"Click on the button above to play song \",\r\n font=(\"times new roman\", 10, \"bold\")).pack(pady=20)\r\nroot.mainloop()","sub_path":"draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"483307728","text":"'''\nProblem 115\n\nA row measuring n units in length has red blocks with a minimum length of m units placed on it, such that any two red blocks (which are allowed to be different lengths) are separated by at least one black square.\n\nLet the fill-count function, F(m, n), represent the number of ways that a row can be filled.\n\nFor example, F(3, 29) = 673135 and F(3, 30) = 1089155.\n\nThat is, for m = 3, it can be seen that n = 30 is the smallest value for which the fill-count function first exceeds one million.\n\nIn the same way, for m = 10, it can be verified that F(10, 56) = 880711 and F(10, 57) = 1148904, so n = 57 is the least value for which the fill-count function first exceeds one million.\n\nFor m = 50, find the least value of n for which the fill-count function first exceeds one million.\n'''\n\nimport time\n\ndef counting_block_combinations_two():\n\tcache = {}\n\tn, m = 50, 50\n\tdef compute(n, m):\n\t\tif n in cache:\n\t\t\treturn cache[n]\n\t\telif n < m:\n\t\t\treturn 0\n\t\telse:\n\t\t\ttotal = 0\n\t\t\tfor i in range(m, n+1):\n\t\t\t\ttotal += 1 + compute(n - i - 1, m)\n\t\t\ttotal += compute(n-1, m)\n\t\t\tcache[n] = total\n\t\t\treturn total\n\twhile True:\n\t\tif compute(n, m) >= 1000000:\n\t\t\treturn n\n\t\tn = n + 1\n\nif __name__ == '__main__':\n\n\tstart = time.time()\n\tprint(counting_block_combinations_two())\n\tend = time.time()\n\n\tprint(\"Execution time: %fs\" %(end - start))\n","sub_path":"solutions/counting_block_combinations_two.py","file_name":"counting_block_combinations_two.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"539768389","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport os, re, subprocess, shutil\n\ndef getPhysical():\n\tfdisk = {}\n\tcommand = ('beesu LC_MESSAGES=C fdisk -l |grep /dev/sd')\n\tret = os.popen( command ).read()\n\tfor str in re.findall('Disk.* .*B', ret):\n\t\tfdisk[str.split()[1].strip(':')] = str.split()[2:4]\n\treturn fdisk\n\ndef getLogical():\n\tarr = {}\n\tarrmnt = getmntArr()\n\tcommand = ('beesu LC_MESSAGES=C /sbin/blkid -s TYPE -s LABEL |grep \"/dev/sd\"')\n\tfor key_val in os.popen( command ).read().split('\\n'):\n\t\tif len(key_val.split()) > 1:\n\t\t\tif len(key_val.split()) == 3:\n\t\t\t\tarr[key_val.split()[0].strip(':')] = [key_val.split()[1].replace('LABEL=', '').strip('\"'), ]\n\t\t\t\tarr[key_val.split()[0].strip(':')].append (key_val.split()[2].replace('TYPE=', '').strip('\"'))\n\t\t\tif len(key_val.split()) == 2: \n\t\t\t\tarr[key_val.split()[0].strip(':')] = ['nolabel', ]\n\t\t\t\tarr[key_val.split()[0].strip(':')].append (key_val.split()[1].replace('TYPE=', '').strip('\"'))\t\n\t\t\tif key_val.split()[0].strip(':') in arrmnt:\n\t\t\t\tfor a in arrmnt[key_val.split()[0].strip(':')] :\n\t\t\t\t\tarr[key_val.split()[0].strip(':')].append(a)\n\tfor key, val in list(arr.items()):\n\t\tif len(val) == 7 :\n\t\t\tdirls = os.listdir(val[6])\n\t\t\tif len(dirls) > 0:\n\t\t\t\tarr[key] = arr.get(key), dirls\n\t\t\telse:\n\t\t\t\tarr[key] = arr.get(key), ['disk empty',]\n\t\telse:\n\t\t\tadd = [ '---', '---','---', '---', '---' ]\n\t\t\tarr[key] = val + add \n\t\t\tif val[1] != 'swap':\n\t\t\t\tarr[key] = arr.get(key), ['is not mounted',]\n\t\t\telse:\n\t\t\t\tarr[key] = arr.get(key), ['swap partition',]\n\treturn arr\n\n\ndef getmntArr ():\n\tarrmnt = {}\n\tcommand2 = ( 'findmnt -lnC -o SOURCE,SIZE,USED,AVAIL,USE%,TARGET |grep sd' )\n\tfor line in os.popen( command2 ).read().split('\\n'):\n\t\tif len(line.split()) > 5:\n\t\t\tarrmnt[line.split()[0] ] = ( line.split()[1], line.split()[2], line.split()[3], line.split()[4], line.split()[5] ) \n\treturn arrmnt\n\nif __name__ == '__main__':\n\n\tphysical = getPhysical()\n\tlogical = getLogical()\n\tmnt = getmntArr()\n\n\t#print logical\n\t#print '-----------------------------------***'\n\t#print physical \n\t#print '-----------------------------------***'\n\t#print mnt\n\t\n\tfor key, val in list(physical.items()):\n\t\tprint(key + ': ' + val[0] + ' ' +val[1])\n\t\tfor key1, val1 in list(logical.items()):\n\t\t\tif key1[:-1] == key:\n\t\t\t\tprint(key1 + ': LABEL=' + val1[0][0], 'FS=' + val1[0][1], 'SIZE='+ val1[0][2], 'USED=' + val1[0][3], 'FREE=' + val1[0][4], 'USED%=' + val1[0][5], 'MOUNT=' + val1[0][6])\n\t\t\t\tprint(' ', val1[1])\n\t\tprint('')\n\t\t\n\n\n","sub_path":"make_MagOS/files/patches/rootfs/MagOS/usr/share/magos/modmnger3/cgi-bin/disks.py","file_name":"disks.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"41354125","text":"import urllib\nimport requests\nfrom crossref.restful import Works, Etiquette\nmy_etiquette = Etiquette('Wikipedia quality bachelor thesis', '1.0', 'null', 'qamilnowak@gmail.com')\nstr(my_etiquette)\nworks = Works()\nworks = Works(etiquette=my_etiquette)\ndef fetch_issns():\n with open('doien_ga.tsv') as f:\n lines = f.readlines()[1:] # skip line 1 (table headers)\n\n articles = []\n for line in lines:\n articles.append(\n {\n 'issn': line.split('\\t')[0].strip(),\n })\n\n return articles\n\ndef retrieve_data(doi_encoded, article):\n return {\n 'issn': article['issn'],\n 'enc': doi_encoded['ISSN'][0] if 'ISSN' in doi_encoded else 'null',\n }\n\n\ndef fetch_results():\n results = []\n\n for article in fetch_issns():\n if works.doi(article['issn']) is not None:\n doi_encoded = works.doi(article['issn'])\n else:doi_encoded='null'\n print('[INFO] Parsed DOI: ' + str(article['issn']))\n results.append(\n retrieve_data(doi_encoded, article))\n\n return results\n\n\ndef write_to_file(results):\n with open('doien_ga_ext.tsv', 'w') as f:\n for result in results:\n for index, item in enumerate(result):\n if index < (len(result) - 1):\n f.write(str(result[item]) + '\\t')\n else:\n f.write(str(result[item]) + '\\n')\n\n print('INFO] Wrote to file')\n\n\nresults = fetch_results()\nwrite_to_file(results)\n","sub_path":"doi1.py","file_name":"doi1.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"334320778","text":"'''\nCreated on Sep 2, 2016\n\n@author: uid38420\n'''\nimport os\nimport cv2\nimport numpy as np\nfrom glob import glob\n\ndatabase = \"D:/Codes/Python/PreProcessing/IlluminationNormalisation/results/YaleB/\"\n\ndef detectBlur(image):\n threshold = 100\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n # compute the Laplacian of the image and return the focus measure (variance of Laplacian)\n var = cv2.Laplacian(gray, cv2.CV_64F).var()\n text = \"Not Blurry\"\n\n if var < threshold:\n text = \"Blurry\"\n cv2.putText(image, \"{}: {:.2f}\".format(text, var), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n return image\n\n# generating kernels\n#Sharpening\nkernel_sharpen_1 = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n#Excessive Sharpening\nkernel_sharpen_2 = np.array([[1, 1, 1], [1, -7, 1], [1, 1, 1]])\n#Edge Enhancement\nkernel_sharpen_3 = np.array([[-1, -1, -1, -1, -1],\n [-1, 2, 2, 2, -1],\n [-1, 2, 8, 2, -1],\n [-1, 2, 2, 2, -1],\n [-1, -1, -1, -1, -1]]) / 8.0\n\ndir = os.listdir(database) \nfor item in dir :\n if \".git\" not in item:\n fileName = database + item + \"/*.jpg\"\n \n for fn in glob(fileName):\n img1 = cv2.imread(fn)\n img2 = cv2.imread(fn)\n \n #detect blur\n blurriness = detectBlur(img2)\n \n name = fn.rpartition('\\\\')\n print('processing...' + name[2])\n \n # applying different kernels to the input image\n output_1 = cv2.filter2D(img1, -1, kernel_sharpen_1)\n output_2 = cv2.filter2D(img1, -1, kernel_sharpen_2)\n output_3 = cv2.filter2D(img1, -1, kernel_sharpen_3)\n output_4 = cv2.filter2D(output_1, -1, kernel_sharpen_3) #sharpen+edge\n \n #see all results simultaneously\n# result = np.hstack((blurriness, output_1,output_2, output_3, output_4))\n# cv2.imshow(\"res\", result)\n# cv2.waitKey(0)\n# fileName = name[2].rpartition('.')\n# resultPath = os.getcwd() + \"\\\\\" + \"results\" + \"\\\\\" + item + \"\\\\\"\n# if not os.path.exists(resultPath):\n# os.makedirs(resultPath)\n# resultName = resultPath + name[2]\n# cv2.imwrite(resultName, result)\n \n #save results\n #result - Sharpening\n resultPath = os.getcwd() + \"\\\\\" + \"results\" + \"\\\\\" + item + \"\\\\Sharpening\\\\\"\n if not os.path.exists(resultPath):\n os.makedirs(resultPath)\n resultName = resultPath + name[2]\n cv2.imwrite(resultName, output_1)\n \n #result - Edge Enhancement\n resultPath = os.getcwd() + \"\\\\\" + \"results\" + \"\\\\\" + item + \"\\\\Edge Enhancement\\\\\"\n if not os.path.exists(resultPath):\n os.makedirs(resultPath)\n resultName = resultPath + name[2]\n cv2.imwrite(resultName, output_3)\n \n #result - Sharpen+Edge\n resultPath = os.getcwd() + \"\\\\\" + \"results\" + \"\\\\\" + item + \"\\\\Sharpen+Edge\\\\\"\n if not os.path.exists(resultPath):\n os.makedirs(resultPath)\n resultName = resultPath + name[2]\n cv2.imwrite(resultName, output_4)","sub_path":"PreProcessing/Blurriness/sharpening.py","file_name":"sharpening.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"383218531","text":"# Support Vector machine\r\n# Inputs a 2d Array and 1D array as Inputs\r\n# Vectorization or Feature Extraction, COnvert Text and Image data into\r\n# Array data for machie learning\r\n# samples 1D\r\n# Features 2D\r\n\r\n\r\nfrom sklearn import datasets\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.datasets import get_data_home\r\nimport matplotlib.pyplot as plt\r\n\r\niris = load_iris()\r\niris.keys()\r\n\r\nn_samples, n_features = iris.data.shape\r\n\r\nprint(n_samples)\r\nprint(n_features)\r\nprint(iris.data[0])\r\n\r\nprint(iris.data.shape)\r\nprint(iris.target.shape)\r\n\r\nprint(iris.target)\r\nprint(iris.target_names)\r\n\r\n# These are the Feature Columns that will be used to plot the graph\r\n# Change between 0-3\r\nx_index = 2\r\ny_index = 3\r\n\r\n# this formatter will label the colorbar with the correct target names\r\nformatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])\r\n\r\nplt.scatter(iris.data[:, x_index], iris.data[:, y_index],250, c=iris.target)\r\nplt.colorbar(ticks=[0, 1, 2], format=formatter)\r\nplt.xlabel(iris.feature_names[x_index])\r\nplt.ylabel(iris.feature_names[y_index])\r\nplt.show()\r\n","sub_path":"mc1.py","file_name":"mc1.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"101326557","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 9 14:31:49 2020\n\n@author: arnoud\n\n% Self-defined Loss Functions\n\n\"\"\"\n\nimport tensorflow.keras.backend as K\nimport tensorflow as tf\nimport numpy as np\nfrom itertools import product\n\n\ndef focal_loss_fixed(y_true, y_pred, alpha_value, gamma_value):\n \"\"\"Focal loss for multi-classification\n FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)\n Notice: y_pred is probability after softmax\n gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper\n d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)\n Focal Loss for Dense Object Detection\n https://arxiv.org/abs/1708.02002\n\n Arguments:\n y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]\n y_pred {tensor} -- model's output, shape of [batch_size, num_cls]\n\n Keyword Arguments:\n gamma {float} -- (default: {2.0})\n alpha {float} -- (default: {4.0})\n\n Returns:\n [tensor] -- loss.\n \"\"\"\n \n alpha, gamma = alpha_value, gamma_value\n\n epsilon = 1.e-9\n y_true = tf.convert_to_tensor(y_true, tf.float32)\n y_pred = tf.convert_to_tensor(y_pred, tf.float32)\n\n model_out = tf.add(y_pred, epsilon)\n ce = tf.multiply(y_true, -tf.math.log(model_out))\n weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma))\n fl = tf.multiply(alpha, tf.multiply(weight, ce))\n reduced_fl = tf.reduce_max(fl, axis=1)\n return tf.reduce_mean(reduced_fl)\n\n\ndef wcce(y_true, y_pred, weights):\n Kweights = K.constant(weights)\n if not K.is_tensor(y_pred): y_pred = K.constant(y_pred)\n y_true = K.cast(y_true, y_pred.dtype)\n return K.categorical_crossentropy(y_true, y_pred) * K.sum(y_true * Kweights, axis=-1)\n\n\ndef w_categorical_crossentropy(y_true, y_pred, weights):\n nb_cl = len(weights)\n final_mask = K.zeros_like(y_pred[:, 0])\n y_pred_max = K.max(y_pred, axis=1)\n y_pred_max = K.expand_dims(y_pred_max, 1)\n y_pred_max_mat = K.equal(y_pred, y_pred_max)\n for c_p, c_t in product(range(nb_cl), range(nb_cl)):\n final_mask += (K.cast(weights[c_t, c_p],K.floatx()) * K.cast(y_pred_max_mat[:, c_p] ,K.floatx())* K.cast(y_true[:, c_t],K.floatx()))\n return K.categorical_crossentropy(y_pred, y_true) * final_mask\n\n\ndef my_categorical_crossentropy(y_true, y_pred): \n loss = K.categorical_crossentropy(y_true,y_pred) \n return loss\n \n \ndef score_loss(y_true, y_pred, n_class):\n loss = 0\n # number of classes\n for i in np.eye(n_class):\n y_true_ = K.constant([list(i)]) * y_true\n y_pred_ = K.constant([list(i)]) * y_pred\n loss += 0.5 * K.sum(y_true_ * y_pred_) / K.sum(y_true_ + y_pred_ + K.epsilon())\n return - K.log(loss + K.epsilon())\n \n \n\n\n\n\n\n\n","sub_path":"Project_FrogLossFunctionCNN_Brazil/MyClass_python/my_loss_function.py","file_name":"my_loss_function.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"522696549","text":"import chainer\nimport chainer.links as L\nimport chainer.functions as F\nfrom gen_models.resblocks import Block\nfrom source.miscs.random_samples import sample_continuous\n\nclass ResNetGenerator(chainer.Chain):\n def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, distribution=\"normal\"):\n super(ResNetGenerator, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.bottom_width = bottom_width\n self.activation = activation\n self.distribution = distribution\n self.dim_z = dim_z\n with self.init_scope():\n self.l1 = L.Linear(dim_z, (bottom_width ** 2) * ch * 16, initialW=initializer)\n self.block2 = Block(ch * 16, ch * 16, activation=activation, upsample=True) #(4x4) => (8x8)\n self.block3 = Block(ch * 16, ch * 8, activation=activation, upsample=True) #(8x8) => (16x16)\n self.block4 = Block(ch * 8, ch * 4, activation=activation, upsample=True) #(16x16) => (32x32)\n self.block5 = Block(ch * 4, ch * 2, activation=activation, upsample=True) #(32x32) => (64x64)\n self.block6 = Block(ch * 2, ch, activation=activation, upsample=True) #(64x64) => (128x128)\n self.block7 = Block(ch, ch//2, activation=activation, upsample=True) #(128x128) => (256x256)\n self.b7 = L.BatchNormalization(ch//2)\n self.l7 = L.Convolution2D(ch//2, 3, ksize=3, stride=1, pad=1, initialW=initializer)\n\n def __call__(self, batchsize=64, z=None, **kwargs):\n if z is None:\n z = sample_continuous(self.dim_z, batchsize, distribution=self.distribution, xp=self.xp)\n h = z\n h = self.l1(h)\n h = F.reshape(h, (h.shape[0], -1, self.bottom_width, self.bottom_width)) # (Batchsize, auto, 4, 4)\n h = self.block2(h)\n h = self.block3(h)\n h = self.block4(h)\n h = self.block5(h)\n h = self.block6(h)\n h = self.block7(h)\n h = self.b7(h)\n h = self.activation(h)\n h = F.tanh(self.l7(h))\n return h\n","sub_path":"src/models/gen_models/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"274307686","text":"\"\"\"Example module docstrings text\"\"\"\n\n\ndef print_grid(grid_dimension=1, grid_size=1):\n \"\"\"Return a specified grid dimension with specified grid size.\"\"\"\n\n if grid_dimension < 1:\n grid_dimension = 1\n else:\n grid_dimension = int(round(grid_dimension))\n\n if grid_size < 1:\n grid_size = 1\n else:\n grid_size = int(round(grid_size))\n\n # initializing variables\n plus = '+'\n minus = '-'\n line = '|'\n space = ' '\n major = ''\n minor = ''\n\n # creating horizonal major with '+' & '-', and horizonal minor with '|'\n for i in range(0, grid_dimension):\n minusnspace = minus + space\n major = major + plus + space + (minusnspace * grid_size)\n minor = minor + line + space + (2 * grid_size * space)\n\n major = major + plus\n minor = minor + line\n\n # looping to print major and minor\n for i in range(0, grid_dimension):\n print(major)\n for j in range(0, grid_size):\n print(minor)\n\n print(major)\n return\n","sub_path":"students/khtruong/lesson_02/gridprinter.py","file_name":"gridprinter.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"34945364","text":"import array\nimport copy\n\n\nSIDES = 4 # tetravex\n\nEMPTY = -1 # indicating free cell in solution\nGRAY = 0 # indicating border\n\nNORTH = 0\nSOUTH = 2\nWEST = 3\nEAST = 1\n\n\ndef initialize_grid(rows, cols):\n return [[array.array('h', [-1, -1, -1, -1]) for x in range(cols)] for y in range(rows)]\n\nPYTHON_FORMAT = 'python_format' \nJAVA_FORMAT = 'java_format' \nNATLO_FORMAT = 'natlo_format' \nBENOIST_FORMAT = 'benoist_format'\nVANSTONE_FORMAT = 'vanstone_format'\n\ndef initialize_pieces(n_pieces=4, puzzles_format=PYTHON_FORMAT, filename=None):\n grid_size = None\n with open(filename, 'r') as f:\n pieces = []\n for i, line in enumerate(f):\n if puzzles_format == BENOIST_FORMAT:\n if i == 0:\n grid_size = int(line.strip())\n elif i == 1:\n number_of_colors = int(line.strip())\n elif i == 2:\n number_of_hints = int(line.strip())\n elif i <= (2 + number_of_hints):\n # skip hints\n continue\n else:\n piece = line.strip().split(\" \")\n pieces.append(\n (piece[NORTH],\n piece[1],\n piece[2],\n piece[3]\n )\n )\n\n elif puzzles_format == VANSTONE_FORMAT:\n if i == 0:\n grid_size = int(line.strip().split()[0])\n elif i ==1 or i == 2:\n # number of colors\n continue\n elif grid_size is not None and (i - 2 > grid_size * grid_size):\n print(grid_size, i, 'aaa')\n break\n else:\n piece = [int(x) for x in line.strip().split(\" \")]\n pieces.append(\n (piece[NORTH],\n piece[1],\n piece[2],\n piece[3]),\n )\n\n elif puzzles_format == JAVA_FORMAT:\n puzzle_subpiece = [int(x) for x in line.strip().split(' ') if x]\n for i in range(len(puzzle_subpiece) // SIDES):\n pieces.append(\n (puzzle_subpiece[i * SIDES + NORTH],\n puzzle_subpiece[i * SIDES + 1],\n puzzle_subpiece[i * SIDES + 2],\n puzzle_subpiece[i * SIDES + 3])\n )\n elif puzzles_format == NATLO_FORMAT:\n piece = [int(x) for x in line.strip().split(\", \")]\n pieces.append((piece[WEST], piece[NORTH], piece[EAST], piece[SOUTH]))\n elif puzzles_format == PYTHON_FORMAT:\n piece = [int(x) for x in line.strip().split(\" \")]\n pieces.append((piece[NORTH], piece[3], piece[1], piece[2]))\n\n return pieces, grid_size\n\ndef pieces_to_editor_format(pieces):\n import math\n pieces_break = int(math.sqrt(len(pieces)))\n output_str = ''\n for i, piece in enumerate(pieces):\n if i % pieces_break == 0:\n output_str += '\\n'\n output_str += (f'{piece[NORTH]} {piece[EAST]} {piece[SOUTH]} {piece[WEST]} ')\n\n print(output_str)\n return output_str\n\ndef pieces_to_orientations(pieces):\n '''\n given an array of pieces, return an array with all the pieces possible orientations\n '''\n ret_pieces = [[EMPTY for x in range(SIDES)] for y in range(len(pieces) * 4)]\n for i, piece in enumerate(pieces):\n ret_pieces[0 + i * 4][0] = piece[0]\n ret_pieces[0 + i * 4][1] = piece[1]\n ret_pieces[0 + i * 4][2] = piece[2]\n ret_pieces[0 + i * 4][3] = piece[3]\n\n ret_pieces[1 + i * 4][0] = piece[3]\n ret_pieces[1 + i * 4][1] = piece[0]\n ret_pieces[1 + i * 4][2] = piece[1]\n ret_pieces[1 + i * 4][3] = piece[2]\n\n ret_pieces[2 + i * 4][0] = piece[2]\n ret_pieces[2 + i * 4][1] = piece[3]\n ret_pieces[2 + i * 4][2] = piece[0]\n ret_pieces[2 + i * 4][3] = piece[1]\n\n ret_pieces[3 + i * 4][0] = piece[1]\n ret_pieces[3 + i * 4][1] = piece[2]\n ret_pieces[3 + i * 4][2] = piece[3]\n ret_pieces[3 + i * 4][3] = piece[0]\n return ret_pieces\n\ndef rotate_piece(piece, orientation):\n '''\n orientation is integer; rotation clockwise\n '''\n\n if orientation == 0:\n ret_piece = (piece[0], piece[1], piece[2], piece[3])\n elif orientation == 1:\n ret_piece = (piece[3], piece[0], piece[1], piece[2])\n elif orientation == 2:\n ret_piece = (piece[2], piece[3], piece[0], piece[1])\n elif orientation == 3:\n ret_piece = (piece[1], piece[2], piece[3], piece[0])\n return ret_piece\n\ndef place_piece_on_grid(grid, piece, position, is_circular=False):\n '''\n place position on some position.\n position is determined by strategy.\n '''\n\n success = is_move_legal(grid, piece, position)\n if not success:\n return False, None, None\n grid = copy.deepcopy(grid)\n\n grid[position[0]][position[1]][0] = piece[0]\n grid[position[0]][position[1]][1] = piece[1]\n grid[position[0]][position[1]][2] = piece[2]\n grid[position[0]][position[1]][3] = piece[3]\n next_position = get_next_position(grid, position, is_circular=is_circular)\n return success, grid, next_position\n\ndef get_valid_next_moves(grid, pieces, position):\n\n '''\n return valid next moves as a tuple indicating (piece index, orientation)\n '''\n possible_moves = []\n for i, piece in enumerate(pieces):\n for orientation in range(SIDES):\n _piece = rotate_piece(piece, orientation)\n if is_move_legal(grid, _piece, position):\n possible_moves.append((i, orientation))\n return possible_moves\n\n\ndef is_move_legal(grid, piece, position):\n\n rows, cols = len(grid), len(grid[0])\n row, col = position\n\n if (\n # tiles at border with non-matching borders\n (position[0] == 0 and piece[NORTH] != GRAY) or\n (position[0] == rows - 1 and piece[SOUTH] != GRAY) or\n (position[1] == 0 and piece[WEST] != GRAY) or\n (position[1] == cols - 1 and piece[EAST] != GRAY) or \n\n # border tiles in center\n (position[0] != 0 and piece[NORTH] == GRAY) or\n (position[0] != rows -1 and piece[SOUTH] == GRAY) or\n (position[1] != 0 and piece[WEST] == GRAY) or\n (position[1] != cols -1 and piece[EAST] == GRAY)\n ):\n return False\n elif (\n (row > 0 and piece[NORTH] != grid[row - 1][col][SOUTH] and grid[row - 1][col][SOUTH] != EMPTY) or \n (row < rows -1 and piece[SOUTH] != grid[row + 1][col][NORTH] and grid[row + 1][col][NORTH] != EMPTY) or\n (col > 0 and piece[WEST] != grid[row][col - 1][EAST] and grid[row][col - 1][EAST] != EMPTY) or\n (col < cols - 1 and piece[EAST] != grid[row][col + 1][WEST] and grid[row][col + 1][WEST] != EMPTY)\n ):\n return False\n return True\n \n\n\ndef get_next_position(grid, prev_position, is_circular=True):\n rows, cols = len(grid), len(grid[0])\n\n if is_circular:\n '''\n first fill the border\n '''\n if prev_position[0] == 0 and prev_position[1] == cols - 1: # right top corner\n next_position = (prev_position[0] + 1, prev_position[1])\n elif prev_position[0] == rows - 1 and prev_position[1] == cols - 1: # right bottom corner\n next_position = (prev_position[0], prev_position[1] - 1)\n elif prev_position[0] == rows - 1 and prev_position[1] == 0: # left bottom corner\n next_position = (prev_position[0] - 1, prev_position[1])\n elif prev_position[0] == 0 and prev_position[1] == 0: # left top corner\n next_position = (prev_position[0], prev_position[1] + 1)\n elif prev_position[0] == 1 and prev_position[1] == 0: # frame has been filled\n next_position = (prev_position[0], prev_position[1] + 1)\n elif prev_position[0] == 0: # top row\n next_position = (prev_position[0], prev_position[1] + 1)\n elif prev_position[0] == rows - 1: # bottom row\n next_position = (prev_position[0], prev_position[1] - 1)\n elif prev_position[1] == 0: # left side\n next_position = (prev_position[0] - 1, prev_position[1])\n elif prev_position[1] == cols - 1: # right side\n next_position = (prev_position[0] + 1, prev_position[1])\n else:\n # row by row avoiding frame\n if prev_position[0] == rows -1 and prev_position[1] == cols -1:\n next_position = None\n elif prev_position[1] == cols - 2:\n next_position = (prev_position[0] + 1, 1)\n else: # not close to any border, just do the normal\n next_position = (\n ((prev_position[0] * cols) + prev_position[1] + 1) // cols, \n ((prev_position[0] * cols) + prev_position[1] + 1) % cols \n )\n else:\n # row by row\n next_position = (\n ((prev_position[0] * cols) + prev_position[1] + 1) // cols, \n ((prev_position[0] * cols) + prev_position[1] + 1) % cols \n )\n return next_position\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":9259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"182456068","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dtrprofile', '0012_auto_20141225_0936'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userprofile',\n name='lookingfor',\n field=models.SmallIntegerField(default=0, verbose_name='lookingfor', choices=[(0, ''), (1, 'friends only'), (2, 'serious relationship'), (3, 'casual dating'), (4, 'passion'), (5, 'casual sex'), (6, 'not sure yet'), (7, 'marriage')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='userflag',\n name='created',\n field=models.DateTimeField(default=datetime.datetime(2015, 1, 13, 6, 40, 53, 119287, tzinfo=utc)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='userpic',\n name='created',\n field=models.DateTimeField(default=datetime.datetime(2015, 1, 13, 6, 40, 53, 121224, tzinfo=utc)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='last_active',\n field=models.DateTimeField(db_index=True, default=datetime.datetime(2015, 1, 13, 6, 40, 53, 125242, tzinfo=utc)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='last_modified',\n field=models.DateTimeField(default=datetime.datetime(2015, 1, 13, 6, 40, 53, 125173, tzinfo=utc)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='lat',\n field=models.FloatField(db_index=True, default=None, null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='lng',\n field=models.FloatField(db_index=True, default=None, null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"dtrprofile/migrations/0013_auto_20150113_0640.py","file_name":"0013_auto_20150113_0640.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"232241855","text":"#!/usr/bin/env python\n\n# What is the largest prime factor of the number 600851475143?\n\ntarget = 600851475143\n\n\ndef isPrime(number):\n for j in range(2, int(number) // 2):\n if number % j == 0:\n return 1\n return 0\n\n\ndef main(target):\n for i in range(2, target // 2):\n k = target / i\n if k % 1 == 0:\n if isPrime(k) == 0:\n print(\"The highest prime factorial is\", int(k))\n break\n\n\nif __name__ == \"__main__\":\n main(target)\n","sub_path":"001-010/003/003.py","file_name":"003.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"561930890","text":"import logging\nimport os\nimport shutil\n\nfrom assemblyline.common.exceptions import ChainAll\nfrom assemblyline.common.uid import get_random_id\nfrom assemblyline.filestore.transport.base import Transport, TransportException, normalize_srl_path\n\n\n@ChainAll(TransportException)\nclass TransportLocal(Transport):\n \"\"\"\n Local file system Transport class.\n \"\"\"\n\n def __init__(self, base=None, normalize=None):\n self.log = logging.getLogger('assemblyline.transport.local')\n self.base = base\n self.host = \"localhost\"\n\n def local_normalize(path):\n # If they've provided an absolute path. Leave it a is.\n if path.startswith('/'):\n s = path\n # Relative paths\n elif '/' in path or len(path) != 64:\n s = _join(self.base, path)\n else:\n s = _join(self.base, normalize_srl_path(path))\n self.log.debug('local normalized: %s -> %s', path, s)\n return s\n\n if not normalize:\n normalize = local_normalize\n\n super(TransportLocal, self).__init__(normalize=normalize)\n\n def delete(self, path):\n path = self.normalize(path)\n os.unlink(path)\n\n def exists(self, path):\n path = self.normalize(path)\n return os.path.exists(path)\n\n def getmtime(self, path):\n path = self.normalize(path)\n\n try:\n return os.path.getmtime(path)\n except OSError:\n return 0\n\n def makedirs(self, path):\n path = self.normalize(path)\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == 17:\n pass\n else:\n raise e\n\n # File based functions\n def download(self, src_path, dst_path):\n if src_path == dst_path:\n return\n\n src_path = self.normalize(src_path)\n dir_path = os.path.dirname(dst_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n shutil.copy(src_path, dst_path)\n\n def upload(self, src_path, dst_path):\n dst_path = self.normalize(dst_path)\n if src_path == dst_path:\n return\n\n dirname = os.path.dirname(dst_path)\n filename = os.path.basename(dst_path)\n tempname = get_random_id()\n temppath = _join(dirname, tempname)\n finalpath = _join(dirname, filename)\n assert (finalpath == dst_path)\n self.makedirs(dirname)\n shutil.copy(src_path, temppath)\n shutil.move(temppath, finalpath)\n assert (self.exists(dst_path))\n\n # Buffer based functions\n def get(self, path):\n path = self.normalize(path)\n fh = None\n try:\n fh = open(path, \"rb\")\n return fh.read()\n finally:\n if fh:\n fh.close()\n\n def put(self, path, content):\n path = self.normalize(path)\n\n dirname = os.path.dirname(path)\n filename = os.path.basename(path)\n\n tempname = get_random_id()\n temppath = _join(dirname, tempname)\n\n finalpath = _join(dirname, filename)\n assert(finalpath == path)\n\n self.makedirs(dirname)\n fh = None\n try:\n fh = open(temppath, \"wb\")\n return fh.write(content)\n finally:\n if fh:\n fh.close()\n\n try:\n shutil.move(temppath, finalpath)\n except shutil.Error:\n pass\n assert(self.exists(path))\n\n def __str__(self):\n return 'file://{}'.format(self.base)\n\n###############################\n# Helper functions.\n###############################\n\n\ndef _join(base, path):\n path = path.replace(\"\\\\\", \"/\").replace(\"//\", \"/\")\n if base is None:\n return path\n return os.path.join(base, path.lstrip(\"/\")).replace(\"\\\\\", \"/\")\n","sub_path":"assemblyline/filestore/transport/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"55596225","text":"from flask import Flask\n\n# Initialize the app\napp = Flask(__name__, instance_relative_config=True)\n\n# Load the views\nfrom app import views\n\napp.config['SECRET_KEY'] = \"this-is-secret\"\n\n# Load the config file\napp.config.from_object('config')\n\nif __name__ == '__main__':\n app.run()\n ","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"612711590","text":"import csv\nfilename = input(\"请输入文件名\")\n\na = input(\"请输入添加文字\")\ncsvFile = open(filename+\".csv\", \"r\")\nreader = csv.reader(csvFile)\ntmp = []\nfor item in reader:\n c = item[0]+a\n item.append(c)\n tmp.append(item)\n\ncsvFile = open(\"鼻咽.csv\", \"w\")\nwriter = csv.writer(csvFile)\nfor i in tmp:\n writer.writerow(i)\ncsvFile.close()","sub_path":"test/tesr.py","file_name":"tesr.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"332398054","text":"import os\nimport unittest\n\nfrom sqlalchemy import MetaData, Table, Column, Integer, DateTime, select\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.schema import CreateIndex\n\nfrom pedsnetdcc.age_transform import AgeTransform\nfrom pedsnetdcc.utils import make_conn_str\nfrom pedsnetdcc.tests.transform_test_utils import clean\n\n\nclass AgeTest(unittest.TestCase):\n\n def setUp(self):\n self.metadata = MetaData()\n\n foo_col = Column('foo_start_time', DateTime)\n bar_col = Column('bar_start_time', DateTime)\n person_col = Column('person_id', Integer)\n\n self.table1 = Table('table1', self.metadata,\n foo_col,\n bar_col,\n person_col)\n\n baz_col = Column('baz_start_time', DateTime)\n baz_person_col = Column('person_id', Integer)\n\n self.table2 = Table('table2', self.metadata,\n baz_col, baz_person_col)\n\n # Create and add the `person` table to the sqlalchemy metadata\n self.person = Table('person', self.metadata,\n Column('person_id', Integer),\n Column('time_of_birth', DateTime))\n\n AgeTransform.columns_by_table = {\n 'table1': ('foo_start_time', 'bar_start_time'),\n }\n\n def test_modify_select(self):\n\n select_obj = select([self.table1])\n join_obj = self.table1\n\n select_obj, join_obj = AgeTransform.modify_select(\n self.metadata,\n 'table1',\n select_obj,\n join_obj)\n\n select_obj = select_obj.select_from(join_obj)\n\n new_sql = str(select_obj.compile(dialect=postgresql.dialect()))\n\n expected = clean(\"\"\"\n SELECT table1.foo_start_time,\n table1.bar_start_time,\n table1.person_id,\n months_in_interval(person.time_of_birth, table1.foo_start_time)\n AS foo_start_age_in_months,\n months_in_interval(person.time_of_birth, table1.bar_start_time)\n AS bar_start_age_in_months\n {NL}FROM table1\n JOIN person ON person.person_id = table1.person_id\n \"\"\")\n\n self.maxDiff = None\n self.assertEqual(new_sql, expected)\n\n def test_modify_select_negative(self):\n\n select_obj = select([self.table2])\n join_obj = self.table2\n\n select_obj, join_obj = AgeTransform.modify_select(\n self.metadata,\n 'table2',\n select_obj,\n join_obj)\n\n select_obj = select_obj.select_from(join_obj)\n\n new_sql = str(select_obj.compile(dialect=postgresql.dialect()))\n\n expected = clean(\"\"\"\n SELECT table2.baz_start_time,\n table2.person_id\n {NL}FROM table2\n \"\"\")\n\n self.maxDiff = None\n self.assertEqual(new_sql, expected)\n\n def test_modify_metadata(self):\n\n metadata = AgeTransform.modify_metadata(self.metadata)\n\n indexes = metadata.tables['table1'].indexes\n self.assertEqual(len(indexes), 2, 'Indexes created')\n\n for index in indexes:\n index_sql = str(CreateIndex(index).compile(\n dialect=postgresql.dialect()))\n if index.name == 'tab_fsaim_107eee9e009461416_ix':\n expected = clean(\"\"\"\n CREATE INDEX tab_fsaim_107eee9e009461416_ix\n ON table1 (foo_start_age_in_months)\n \"\"\")\n self.assertEqual(index_sql, expected)\n elif index.name == 'tab_bsaim_ca07fdbcdf9bfef7a_ix':\n expected = clean(\"\"\"\n CREATE INDEX tab_bsaim_ca07fdbcdf9bfef7a_ix\n ON table1 (bar_start_age_in_months)\n \"\"\")\n self.assertEqual(index_sql, expected)\n else:\n self.fail(\n 'Unexpected index encountered: {}'.format(index.name))\n\n def test_pre_transform(self):\n dburi_var = 'PEDSNETDCC_TEST_DBURI'\n search_path_var = 'PEDSNETDCC_TEST_SEARCH_PATH'\n if (dburi_var not in os.environ and\n search_path_var not in os.environ):\n self.skipTest(\n '{} and {} required for testing '\n 'AgeTransform.pre_transform'.format(\n dburi_var, search_path_var))\n conn_str = make_conn_str(uri=os.environ[dburi_var],\n search_path=os.environ[search_path_var])\n AgeTransform.pre_transform(conn_str)\n # TODO: verify function creation via introspection\n\n def test_with_data(self):\n # TODO: use test data and verify transformation results\n self.skipTest('Not implemented yet')\n","sub_path":"pedsnetdcc/tests/age_transform_test.py","file_name":"age_transform_test.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"229781150","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtGui import QRegExpValidator\r\nfrom PyQt5.QtCore import QRegExp\r\nfrom datetime import datetime, timedelta\r\nfrom mysql.connector import Error\r\nfrom additional_files import utilities as u \r\nimport uuid\r\nimport ctypes\r\n\r\nclass Ui_MainWindow(QtWidgets.QFileDialog):\r\n\r\n def __init__(self,db):\r\n super().__init__()\r\n self.db=db\r\n \r\n #GUI\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n #MainWindow.resize(ctypes.windll.user32.GetSystemMetrics(0), ctypes.windll.user32.GetSystemMetrics(1))\r\n MainWindow.resize(1366,768)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.frame = QtWidgets.QFrame(self.centralwidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())\r\n self.frame.setSizePolicy(sizePolicy)\r\n self.frame.setAutoFillBackground(False)\r\n self.frame.setStyleSheet(\"*\\n\"\r\n \"{\\n\"\r\n \" font-family:Century Gothic;\\n\"\r\n \" font-size:15px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QFrame{\\n\"\r\n \"background:rgb(255, 255, 217);\\n\"\r\n \"border:2px solid black;\\n\"\r\n \"}\\n\"\r\n \"Qlabel#ImageLabel\\n\"\r\n \"{ \\n\"\r\n \" border:1px solid black\\n\"\r\n \"}\\n\"\r\n \"QLabel{\\n\"\r\n \" font-weight:bold;\\n\"\r\n \" border:none\\n\"\r\n \"}\\n\"\r\n \"QLineEdit\\n\"\r\n \"{\\n\"\r\n \" background:white;\\n\"\r\n \" padding:5px;\\n\"\r\n \" font-size:12px;\\n\"\r\n \"}\\n\"\r\n \"QPushButton{\\n\"\r\n \"background:white;\\n\"\r\n \"padding:2px;\\n\"\r\n \"font-size:10px;\\n\"\r\n \"border-style:inlet;\\n\"\r\n \"border:1px solid black;\\n\"\r\n \"}\\n\"\r\n \"QPushButton#Submit{\\n\"\r\n \" background:rgb(15, 73, 61);\\n\"\r\n \" font-size:15px;\\n\"\r\n \" color:white;\"\r\n \"}\\n\"\r\n \"QPushButton:pressed{\\n\"\r\n \" border-style:outlet;\\n\"\r\n \"}\\n\"\r\n \"QComboBox{\\n\"\r\n \" background:white;\\n\"\r\n \" border-style:inlet;\\n\"\r\n \" border:1px solid black;\\n\"\r\n \"}\\n\"\r\n \"QComboBox:focus{\\n\"\r\n \" border-style:outlet;\\n\"\r\n \"}\\n\"\r\n \"QDateEdit{\\n\"\r\n \" background:white;\\n\"\r\n \"}\\n\"\r\n \"\")\r\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.frame.setObjectName(\"frame\")\r\n self.NameEdit = QtWidgets.QLineEdit(self.frame)\r\n self.NameEdit.setGeometry(QtCore.QRect(280, 70, 371, 41))\r\n self.NameEdit.setObjectName(\"NameEdit\")\r\n self.DOBEdit = QtWidgets.QDateEdit(self.frame)\r\n self.DOBEdit.setGeometry(QtCore.QRect(280, 150, 191, 24))\r\n self.DOBEdit.setObjectName(\"DOBEdit\")\r\n self.minimumDateDob=datetime.today()-timedelta(days=365*20)\r\n self.DOBEdit.setMaximumDateTime(self.minimumDateDob)\r\n self.DOBEdit.setMinimumDateTime(datetime.today()-timedelta(days=365*60))\r\n self.DOJEdit = QtWidgets.QDateEdit(self.frame)\r\n self.DOJEdit.setGeometry(QtCore.QRect(960, 150, 191, 24))\r\n self.DOJEdit.setObjectName(\"DOJEdit\")\r\n self.DOJEdit.setMaximumDateTime(datetime.today())\r\n self.minimumDateDoj=datetime(1932,10,15) \r\n self.DOJEdit.setMinimumDateTime(self.minimumDateDoj)\r\n self.DOB = QtWidgets.QLabel(self.frame)\r\n self.DOB.setGeometry(QtCore.QRect(90, 150, 100, 21))\r\n self.DOB.setObjectName(\"DOB\")\r\n self.ImageButton = QtWidgets.QPushButton(self.frame)\r\n self.ImageButton.setGeometry(QtCore.QRect(1160, 230, 91, 21))\r\n self.ImageButton.setObjectName(\"ImageButton\")\r\n self.SignatureButton = QtWidgets.QPushButton(self.frame)\r\n self.SignatureButton.setGeometry(QtCore.QRect(1160, 410, 91, 21))\r\n self.SignatureButton.setObjectName(\"SignatureButton\")\r\n self.DepartmentEdit = QtWidgets.QComboBox(self.frame)\r\n self.DepartmentEdit.setGeometry(QtCore.QRect(280, 540, 271, 24))\r\n self.DepartmentEdit.setAcceptDrops(False)\r\n self.DepartmentEdit.setEditable(True)\r\n self.DepartmentEdit.setObjectName(\"DepartmentEdit\")\r\n self.DepartmentEdit.addItem(\"\")\r\n self.DepartmentEdit.addItem(\"\")\r\n self.DepartmentEdit.addItem(\"\")\r\n self.DepartmentEdit.addItem(\"\")\r\n self.Department = QtWidgets.QLabel(self.frame)\r\n self.Department.setGeometry(QtCore.QRect(90, 540, 147, 18))\r\n self.Department.setObjectName(\"Department\")\r\n self.Submit = QtWidgets.QPushButton(self.frame)\r\n self.Submit.setGeometry(QtCore.QRect(610, 590, 101, 41))\r\n self.Submit.setObjectName(\"Submit\")\r\n self.Status = QtWidgets.QLabel(self.frame)\r\n self.Status.setGeometry(QtCore.QRect(5, 650, 1341, 30))\r\n self.Status.setObjectName(\"Status\")\r\n self.ImageLabel = QtWidgets.QLabel(self.frame)\r\n self.ImageLabel.setGeometry(QtCore.QRect(960, 220, 91, 111))\r\n self.ImageLabel.setText(\"\")\r\n self.ImageLabel.setObjectName(\"ImageLabel\")\r\n self.SigLabel = QtWidgets.QLabel(self.frame)\r\n self.SigLabel.setGeometry(QtCore.QRect(950, 400, 121, 41))\r\n self.SigLabel.setText(\"\")\r\n self.SigLabel.setObjectName(\"SigLabel\")\r\n self.Name = QtWidgets.QLabel(self.frame)\r\n self.Name.setGeometry(QtCore.QRect(90, 80, 71, 21))\r\n self.Name.setObjectName(\"Name\")\r\n self.Address1 = QtWidgets.QLabel(self.frame)\r\n self.Address1.setGeometry(QtCore.QRect(90, 240, 91, 16))\r\n self.Address1.setObjectName(\"Address1\")\r\n self.Address2 = QtWidgets.QLabel(self.frame)\r\n self.Address2.setGeometry(QtCore.QRect(90, 360, 81, 20))\r\n self.Address2.setObjectName(\"Address2\")\r\n self.address1Edit = QtWidgets.QLineEdit(self.frame)\r\n self.address1Edit.setGeometry(QtCore.QRect(280, 220, 581, 71))\r\n self.address1Edit.setText(\"\")\r\n self.address1Edit.setObjectName(\"address1Edit\")\r\n self.address2Edit = QtWidgets.QLineEdit(self.frame)\r\n self.address2Edit.setGeometry(QtCore.QRect(280, 330, 581, 71))\r\n self.address2Edit.setObjectName(\"address2Edit\")\r\n self.Phone = QtWidgets.QLabel(self.frame)\r\n self.Phone.setGeometry(QtCore.QRect(90, 460, 91, 16))\r\n self.Phone.setObjectName(\"Phone\")\r\n self.phone1Edit = QtWidgets.QLineEdit(self.frame)\r\n self.phone1Edit.setGeometry(QtCore.QRect(280, 450, 121, 31))\r\n self.phone1Edit.setObjectName(\"phone1Edit\")\r\n self.phone2Edit = QtWidgets.QLineEdit(self.frame)\r\n self.phone2Edit.setGeometry(QtCore.QRect(420, 450, 131, 31))\r\n self.phone2Edit.setObjectName(\"phone2Edit\")\r\n self.phone3Edit = QtWidgets.QLineEdit(self.frame)\r\n self.phone3Edit.setGeometry(QtCore.QRect(570, 450, 141, 31))\r\n self.phone3Edit.setObjectName(\"phone3Edit\")\r\n self.DOJ = QtWidgets.QLabel(self.frame)\r\n self.DOJ.setGeometry(QtCore.QRect(810, 150, 121, 21))\r\n self.DOJ.setObjectName(\"DOJ\")\r\n self.label = QtWidgets.QLabel(self.frame)\r\n self.label.setGeometry(QtCore.QRect(280, 120, 261, 16))\r\n self.label.setStyleSheet(\"color:brown;\\n\"\r\n\"font-size:10px;\")\r\n self.label.setObjectName(\"label\")\r\n self.label_2 = QtWidgets.QLabel(self.frame)\r\n self.label_2.setGeometry(QtCore.QRect(280, 300, 361, 16))\r\n self.label_2.setStyleSheet(\"color:brown;\\n\"\r\n\"font-size:10px;\")\r\n self.label_2.setObjectName(\"label_2\")\r\n self.label_3 = QtWidgets.QLabel(self.frame)\r\n self.label_3.setGeometry(QtCore.QRect(280, 490, 271, 16))\r\n self.label_3.setStyleSheet(\"color:brown;\\n\"\r\n\"font-size:10px;\")\r\n self.label_3.setObjectName(\"label_3\")\r\n self.label_4 = QtWidgets.QLabel(self.frame)\r\n self.label_4.setGeometry(QtCore.QRect(1170, 260, 71, 16))\r\n self.label_4.setStyleSheet(\"color:brown;\\n\"\r\n\"font-size:10px;\")\r\n self.label_4.setObjectName(\"label_4\")\r\n self.label_5 = QtWidgets.QLabel(self.frame)\r\n self.label_5.setGeometry(QtCore.QRect(1180, 440, 71, 20))\r\n self.label_5.setStyleSheet(\"color:brown;\\n\"\r\n\"font-size:10px;\\n\"\r\n\"\")\r\n self.label_5.setObjectName(\"label_5\")\r\n self.horizontalLayout.addWidget(self.frame)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.MainWindow=MainWindow\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Add Employee\"))\r\n self.NameEdit.setPlaceholderText(_translate(\"MainWindow\", \"Enter the name \"))\r\n self.DOB.setText(_translate(\"MainWindow\", \"Date of Birth *\"))\r\n self.DOBEdit.setDisplayFormat(_translate(\"MainWindow\", \"yyyy-MM-dd\"))\r\n self.DOJEdit.setDisplayFormat(_translate(\"MainWindow\", \"yyyy-MM-dd\"))\r\n self.ImageButton.setText(_translate(\"MainWindow\", \"Add Image\"))\r\n self.SignatureButton.setText(_translate(\"MainWindow\", \"Add Signature\"))\r\n self.DepartmentEdit.setItemText(0, _translate(\"MainWindow\", \"Select Department\"))\r\n self.DepartmentEdit.setItemText(1, _translate(\"MainWindow\", \"Information Technology\"))\r\n self.DepartmentEdit.setItemText(2, _translate(\"MainWindow\", \"Mechanical Deptartment\"))\r\n self.DepartmentEdit.setItemText(3, _translate(\"MainWindow\", \"Electrical Department\"))\r\n self.Department.setText(_translate(\"MainWindow\", \"Select Department *\"))\r\n self.Submit.setText(_translate(\"MainWindow\", \"Submit\"))\r\n self.Status.setText(_translate(\"MainWindow\", \"\"))\r\n self.Name.setText(_translate(\"MainWindow\", \"Name *\"))\r\n self.Address1.setText(_translate(\"MainWindow\", \"Address 1 *\"))\r\n self.Address2.setText(_translate(\"MainWindow\", \"Address 2\"))\r\n self.address1Edit.setPlaceholderText(_translate(\"MainWindow\", \"Add address\"))\r\n self.address2Edit.setPlaceholderText(_translate(\"MainWindow\", \"Add address 2 (optional)\"))\r\n self.phone2Edit.setPlaceholderText(_translate(\"MainWindow\",\"(optional)\"))\r\n self.phone3Edit.setPlaceholderText(_translate(\"MainWindow\",\"(optional)\"))\r\n self.Phone.setText(_translate(\"MainWindow\", \"Phone No. *\"))\r\n self.DOJ.setText(_translate(\"MainWindow\", \"Date of Joining*\"))\r\n self.label.setText(_translate(\"MainWindow\", \"Name should be atleast 3 characters long\"))\r\n self.label_2.setText(_translate(\"MainWindow\", \"Address should be alteast 15 characters long \"))\r\n self.label_3.setText(_translate(\"MainWindow\", \"A ten-digit phone number is required\"))\r\n self.label_4.setText(_translate(\"MainWindow\", \"Required\"))\r\n self.label_5.setText(_translate(\"MainWindow\", \"Required\"))\r\n self.photo=''\r\n self.sig=''\r\n self.NameEdit.setFocus()\r\n self.Status.hide()\r\n\r\n #click events\r\n self.ImageButton.clicked.connect(self.fileExplorer)\r\n self.SignatureButton.clicked.connect(self.fileExplorerSig)\r\n self.Submit.clicked.connect(self.onSubmit)\r\n\r\n #return key\r\n self.NameEdit.returnPressed.connect(self.DOBEdit.setFocus)\r\n self.DOBEdit.editingFinished.connect(self.DOJEdit.setFocus)\r\n self.DOJEdit.editingFinished.connect(self.address1Edit.setFocus)\r\n self.address1Edit.returnPressed.connect(self.address2Edit.setFocus)\r\n self.address2Edit.returnPressed.connect(self.phone1Edit.setFocus)\r\n self.phone1Edit.returnPressed.connect(self.phone2Edit.setFocus)\r\n self.phone2Edit.returnPressed.connect(self.phone3Edit.setFocus)\r\n self.phone3Edit.returnPressed.connect(self.ImageButton.click)\r\n self.DepartmentEdit.lineEdit().returnPressed.connect(self.onSubmit)\r\n \r\n #flags for validation\r\n self.flags={\r\n \"name\":False,\r\n \"address1\":False, \r\n \"address2\":True, \r\n \"phone1\":False, \r\n \"phone2\":True, \r\n \"phone3\":True, \r\n \"department\":False, \r\n \"image\":False,\r\n \"sig\":False\r\n }\r\n\r\n #validation\r\n self.NameEdit.editingFinished.connect(self.validation_name)\r\n self.NameEdit.textChanged.connect(self.validation_name)\r\n self.phone1Edit.editingFinished.connect(self.validation_phone1)\r\n self.phone1Edit.textChanged.connect(self.validation_phone1)\r\n self.phone2Edit.editingFinished.connect(self.validation_phone2)\r\n self.phone2Edit.textChanged.connect(self.validation_phone2)\r\n self.phone3Edit.editingFinished.connect(self.validation_phone3)\r\n self.phone3Edit.textChanged.connect(self.validation_phone3)\r\n self.address1Edit.editingFinished.connect(self.validation_address1)\r\n self.address1Edit.textChanged.connect(self.validation_address1)\r\n self.address2Edit.editingFinished.connect(self.validation_address2)\r\n self.address2Edit.textChanged.connect(self.validation_address2)\r\n self.DepartmentEdit.editTextChanged.connect(self.validation_department)\r\n\r\n\r\n def onSubmit(self):\r\n #if all the flags are true then the form can be submitted\r\n if(self.photo!=''):\r\n self.flags['image']=True\r\n else:\r\n self.flags['image']=False\r\n \r\n if(self.sig!=''):\r\n self.flags['sig']=True\r\n else:\r\n self.flags['sig']=False\r\n \r\n count=0\r\n for flag in self.flags:\r\n if(self.flags[flag]==True):\r\n count=count+1\r\n \r\n if count==9:\r\n self.shouldSubmit=True\r\n self.onChecking()\r\n\r\n else:\r\n self.shouldSubmit=False\r\n \r\n\r\n def onChecking(self):\r\n buttonBox = QtWidgets.QMessageBox.question(self, 'Submit details', \"Are you sure you want to submit?\", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)\r\n if(buttonBox==QtWidgets.QMessageBox.Yes):\r\n self.connectDatabase()\r\n elif(buttonBox==QtWidgets.QMessageBox.No):\r\n self.DepartmentEdit.lineEdit().setFocus()\r\n \r\n def generateUniqueId(self,size):\r\n #generates a unique id\r\n id=uuid.uuid1()\r\n\r\n #converting id into int and resizing it to given size\r\n id=id.int % (10**(size))\r\n\r\n return id\r\n\r\n\r\n def connectDatabase(self):\r\n if(self.shouldSubmit!=False):\r\n self.Submit.setText('Submitting...')\r\n cur=self.db.cursor()\r\n\r\n #generating a unique id for every employee\r\n id=self.generateUniqueId(9)\r\n\r\n #saving all the texts in small words for ease\r\n info={\r\n \"name\":self.NameEdit.text(),\r\n \"dob\":str(self.DOBEdit.text()),\r\n \"doj\":str(self.DOJEdit.text()),\r\n \"address\":[self.address1Edit.text(),self.address2Edit.text()],\r\n \"phone\":[self.phone1Edit.text(),self.phone2Edit.text(),self.phone3Edit.text()],\r\n \"department\":str(self.DepartmentEdit.currentText()),\r\n \"photo\":self.photo,\r\n \"sig\":self.sig\r\n }\r\n\r\n #queries for inserting data\r\n employeeQuery=\"insert into employee values(%s,%s,%s,%s)\"\r\n employeeAddressQuery=\"insert into employeeAddress values(%s,%s)\"\r\n employeeTelephoneQuery=\"insert into employeeTelephone values(%s,%s)\"\r\n employeePhotoquery = \"Insert into employeePhoto values(%s,%s)\"\r\n employeeSigquery = \"Insert into employeeSig values(%s,%s)\"\r\n employeeDepartmentQuery=\"insert into employeeDepartment values(%s,%s)\"\r\n\r\n #inserting data\r\n \r\n #adding name, date of birth , date of joining\r\n try:\r\n cur.execute(employeeQuery,(id,info['dob'],info['name'],info['doj']))\r\n self.db.commit()\r\n\r\n #adding addresses \r\n for addr in info['address']:\r\n if(addr!=''):\r\n cur.execute(employeeAddressQuery,(id,addr))\r\n self.db.commit()\r\n\r\n #adding phone numbers\r\n for num in info['phone']:\r\n if(num!=''):\r\n cur.execute(employeeTelephoneQuery,(id,int(num)))\r\n self.db.commit()\r\n\r\n #adding department\r\n cur.execute(employeeDepartmentQuery,(id,info['department']))\r\n self.db.commit()\r\n\r\n #adding photo\r\n cur.execute(employeePhotoquery,(id,info['photo']))\r\n self.db.commit()\r\n\r\n #adding signature\r\n cur.execute(employeeSigquery,(id,info['sig']))\r\n self.db.commit()\r\n\r\n except Error as e:\r\n self.status.setStyleSheet('background:rgb(212,115,70)')\r\n self.status.show()\r\n if e.errno==1062:\r\n self.status.setText('Duplicate Id error : Try Submitting again')\r\n \r\n if e.errno==1146:\r\n self.status.setText('Problem with the Database (table does not exist)')\r\n\r\n elif e.errno==1054:\r\n self.status.setText('Problem with the Database (column does not exist)')\r\n\r\n\r\n #GUI\r\n self.Status.setStyleSheet(\r\n \"background:rgb(15, 73, 61);\\n\"\r\n \"color:white;\\n\"\r\n \"padding:5px;\\n\"\r\n \"font-size:12px;\\n\"\r\n \"font-weight:bold\")\r\n self.Status.show()\r\n self.Status.setText(\"New Employee added successfully\")\r\n self.Submit.setText('Submitted')\r\n messageText=\"UserId : {} (Note for future references)\".format(id) \r\n messageBox = QtWidgets.QMessageBox.question(self, 'Info', messageText, QtWidgets.QMessageBox.Ok , QtWidgets.QMessageBox.Ok)\r\n \r\n if(messageBox==QtWidgets.QMessageBox.Ok):\r\n self.clearData()\r\n\r\n else:\r\n self.Status.show()\r\n print('form is incomplete')\r\n self.Status.setStyleSheet(\r\n \"background:rgb(212,115,70);\\n\"\r\n \"color:black;\\n\"\r\n \"padding:5px;\\n\"\r\n \"font-size:12px;\\n\")\r\n self.Status.setText(\"Form is incomplete\")\r\n\r\n def clearData(self):\r\n style='background:(255,255,217)'\r\n self.NameEdit.setText('')\r\n self.NameEdit.setStyleSheet(style)\r\n self.DOBEdit.setDate(self.minimumDateDob)\r\n self.DOJEdit.setDate(self.minimumDateDoj)\r\n self.address1Edit.setText('')\r\n self.address1Edit.setStyleSheet(style)\r\n self.address2Edit.setText('')\r\n self.phone1Edit.setText('')\r\n self.phone1Edit.setStyleSheet(style)\r\n self.phone2Edit.setText('')\r\n self.phone3Edit.setText('')\r\n self.DepartmentEdit.setCurrentIndex(0)\r\n self.DepartmentEdit.setStyleSheet(style)\r\n self.ImageLabel.setPixmap(QtGui.QPixmap(''))\r\n self.ImageButton.setStyleSheet(style)\r\n self.SigLabel.setPixmap(QtGui.QPixmap(''))\r\n self.SignatureButton.setStyleSheet(style)\r\n self.Status.setText('')\r\n self.Status.hide()\r\n self.NameEdit.setFocus()\r\n self.Submit.setText('Submit')\r\n\r\n\r\n #adding image\r\n def fileExplorer(self):\r\n name=QtWidgets.QFileDialog.getOpenFileName(self,'Open file','c\\\\','Image files (*.jpg *.png *.jpeg)')\r\n if name[0]!='':\r\n imagePath=name[0]\r\n self.photo=u.read_file(imagePath)\r\n \r\n #GUI\r\n pixmap=QtGui.QPixmap(imagePath)\r\n pixmap=pixmap.scaled(100,200,QtCore.Qt.KeepAspectRatio)\r\n self.ImageLabel.setPixmap(pixmap)\r\n self.ImageLabel.setScaledContents(True)\r\n self.fileExplorerSig()\r\n\r\n\r\n #adding signature\r\n def fileExplorerSig(self):\r\n name=QtWidgets.QFileDialog.getOpenFileName(self,'Open file','c\\\\','Image files (*.jpg)')\r\n if name[0]!='':\r\n imagePath=name[0]\r\n self.sig=u.read_file(imagePath)\r\n \r\n #GUI\r\n pixmap=QtGui.QPixmap(imagePath)\r\n pixmap=pixmap.scaled(200, 100, QtCore.Qt.KeepAspectRatio)\r\n self.SigLabel.setPixmap(pixmap)\r\n self.SigLabel.setScaledContents(True)\r\n self.DepartmentEdit.setFocus()\r\n\r\n\r\n #setting up color for onchange validation\r\n def validation_color(self,result,name):\r\n if(result[0]==2): \r\n name.setStyleSheet(\"background:rgb(255, 255, 217)\")\r\n else:\r\n name.setStyleSheet('background:rgb(212, 115, 70)')\r\n\r\n\r\n #validation for name\r\n def validation_name(self):\r\n regName=QRegExp(\"\\w{3,}\\s?\\w*\")\r\n input_validator = QRegExpValidator(regName, self.NameEdit)\r\n result=input_validator.validate(self.NameEdit.text(),0)\r\n self.NameEdit.setValidator(input_validator)\r\n self.validation_color(result,self.NameEdit)\r\n if(result[0]==2):\r\n self.flags['name']=True\r\n else:\r\n self.flags['name']=False\r\n\r\n\r\n #validation for addresses\r\n def validation_address1(self):\r\n #address1edit\r\n regAddress=QRegExp(\"[\\w\\s-.,:]{15,120}\")\r\n input_validator = QRegExpValidator(regAddress, self.address1Edit)\r\n result=input_validator.validate(self.address1Edit.text(),0)\r\n self.address1Edit.setValidator(input_validator)\r\n if(result[0]==2):\r\n self.flags['address1']=True\r\n else:\r\n self.flags['address1']=False\r\n\r\n #colors\r\n self.validation_color(result,self.address1Edit)\r\n\r\n\r\n def validation_address2(self):\r\n #address2Edit\r\n regAddress=QRegExp(\"([\\w\\s-.,:]{15,120}|\\w{0})\")\r\n input_validator=QRegExpValidator(regAddress,self.address2Edit)\r\n result=input_validator.validate(self.address2Edit.text(),0)\r\n self.address2Edit.setValidator(input_validator)\r\n\r\n #colors\r\n self.validation_color(result,self.address2Edit)\r\n if(result[0]==2):\r\n self.flags['address2']=True\r\n else:\r\n self.flags['address2']=False\r\n\r\n #validation for phones\r\n def validation_phone1(self):\r\n #Phone\r\n regPhone=QRegExp(\"\\d{10}\")\r\n input_validator=QRegExpValidator(regPhone,self.phone1Edit)\r\n result=input_validator.validate(self.phone1Edit.text(),0)\r\n self.phone1Edit.setValidator(input_validator)\r\n self.validation_color(result,self.phone1Edit)\r\n if(result[0]==2):\r\n self.flags['phone1']=True\r\n else:\r\n self.flags['phone1']=False\r\n\r\n\r\n def validation_phone2(self):\r\n regPhone=QRegExp('(\\d{10}|\\d{0})')\r\n input_validator=QRegExpValidator(regPhone,self.phone2Edit)\r\n result=input_validator.validate(self.phone2Edit.text(),0)\r\n self.phone2Edit.setValidator(input_validator)\r\n self.validation_color(result,self.phone2Edit)\r\n if(result[0]==2):\r\n self.flags['phone2']=True\r\n else:\r\n self.flags['phone2']=False\r\n\r\n def validation_phone3(self):\r\n regPhone=QRegExp('(\\d{10}|\\d{0})')\r\n input_validator=QRegExpValidator(regPhone,self.phone3Edit)\r\n result=input_validator.validate(self.phone3Edit.text(),0)\r\n self.phone3Edit.setValidator(input_validator)\r\n self.validation_color(result,self.phone3Edit)\r\n if(result[0]==2):\r\n self.flags['phone3']=True\r\n else:\r\n self.flags['phone3']=False\r\n\r\n #validation for department\r\n def validation_department(self):\r\n regDepartment=QRegExp(\"^((?!Select Department).)*$\")\r\n input_validator=QRegExpValidator(regDepartment,self.DepartmentEdit)\r\n result=input_validator.validate(self.DepartmentEdit.currentText(),0)\r\n self.DepartmentEdit.setValidator(input_validator)\r\n self.validation_color(result,self.DepartmentEdit)\r\n if(result[0]==2):\r\n self.flags['department']=True\r\n else:\r\n self.flags['department']=False\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"employee management system/additional_files/addTrainee.py","file_name":"addTrainee.py","file_ext":"py","file_size_in_byte":26421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"109476900","text":"import os\nimport utils\nimport time\nimport json\nimport math\nimport string\n\n# ----------------------------------------------\n# This script reads the json file with the merged\n# data (\"02_AllData.json\") and fixes coding issues\n# identified during review of code lists\n# -----------------------------------------------``\n\n\ndef fix_dimension(dimension, code_fix, description_fix):\n if dimension in record.keys():\n\n for k, v in code_fix.items():\n if record[dimension + '_DESC'] == v:\n record[dimension] = k\n\n for k, v in description_fix.items():\n if record[dimension] == k:\n record[dimension + '_DESC'] = v\n\n# Read the data\n\n\ndata = utils.open_json('data/02_AllData.json')\nprint(data[0])\n\nseries_catalog = utils.tsv2dictlist('data/source/SeriesCatalog.txt')\nprint(series_catalog[0])\n\nupdate_SeriesNames = utils.tsv2dictlist('data/source/update_SeriesNames.txt')\n\ncheck = []\n\nfor record in data:\n\n series_desc = record['SERIES_DESC']\n\n # Fix 1: Fix series names:\n\n series_name_fix = utils.select_dict(\n update_SeriesNames, {'SERIES_DESC_OLD': series_desc})\n\n if len(series_name_fix) > 0:\n record['SERIES_DESC'] = update_SeriesNames[0]['SERIES_DESC_NEW']\n\n # Fix 2: Add series code:\n\n series_data = utils.select_dict(\n series_catalog, {'SERIES_DESC': series_desc})\n\n if len(series_data) > 1:\n print(\n f\"The series {record['SERIES_DESC']} appears multiple times in the series catalog\")\n break\n elif len(series_data) == 0:\n check.append(record['SERIES_DESC'])\n check = list(set(check))\n else:\n record['SERIES'] = series_data[0]['SERIES']\n\n # Fix 3: Fix AGE coding:\n\n code_fix = dict()\n\n description_fix = {\n '_T': 'All age ranges or no breakdown by age',\n 'Y_GE15': '15 years old and over',\n 'Y_GE65': '65 years old and over',\n 'Y_GE80': '80 old and over',\n 'Y15T25': '15 to 25 years',\n 'Y50T54': '50 to 54 years old',\n 'Y55T59': '55 to 59 years old',\n 'Y60T64': '60 to 64 years old',\n 'Y65T69': '65 to 69 years old',\n 'Y70T74': '70 to 74 years old',\n 'Y75T79': '75 to 79 years old',\n 'Y80T84': '80 to 84 years old',\n 'Y85T89': '85 to 89 years old'\n }\n\n fix_dimension('AGE', code_fix, description_fix)\n\n # Fix 4: Fix COD coding:\n\n code_fix = {\n '800': 'Diabetes',\n }\n\n description_fix = {\n '800': 'Diabetes mellitus'\n }\n\n fix_dimension('COD', code_fix, description_fix)\n\n # Fix 5: Fix EDUCATION_LEV coding:\n\n code_fix = dict()\n\n description_fix = {\n 'ISCED11_1':\t'Primary education',\n 'ISCED11_2':\t'Lower secondary education',\n 'ISCED11_3':\t'Upper secondary education',\n 'ISCED11A_0_G23': 'Some primary education, grades 2 or 3'\n }\n\n fix_dimension('EDUCATION_LEV', code_fix, description_fix)\n\n # Fix 6: Fix ETHNICITY coding:\n\n code_fix = {\n 'WH': 'White',\n 'BL_BR': 'Black or brown'\n }\n\n description_fix = {\n '_T': 'Total or no breakdown by ethnicity'\n }\n\n fix_dimension('ETHNICITY', code_fix, description_fix)\n\n # Fix 7: Fix FREQ coding:\n\n if 'FREQ' in record.keys():\n\n if record['FREQ'] == 'S':\n record['FREQ'] = 'A'\n record['FREQ_DESC'] = 'Annual'\n\n # Fix 8: Fix GEOLEVEL coding:\n\n code_fix = {\n '4'\t: 'National'\n }\n\n description_fix = {\n '4': 'Country or Area',\n '5': 'Sub-national'\n }\n\n fix_dimension('GEOLEVEL', code_fix, description_fix)\n\n # Fix 9: Fix HOUSEHOLD_TYPE coding:\n\n code_fix = {\n '_T': 'Total'\n }\n\n description_fix = {\n '2': 'Couples without children',\n '3': 'Couples with children',\n '4': 'Lone parents'\n }\n\n fix_dimension('HOUSEHOLD_TYPE', code_fix, description_fix)\n\n # Fix 10: Fix INCOME_WEALTH_QUANTILE coding:\n\n code_fix = dict()\n\n description_fix = {\n 'Q1': 'Quintile 1 (poorest)',\n 'Q2': 'Quintile 2 (second poorest)',\n 'Q3': 'Quintile 3 (middle)',\n 'Q4': 'Quintile 4 (second richest)',\n 'Q5': 'Quintile 5 (richest)'\n }\n\n fix_dimension('INCOME_WEALTH_QUANTILE', code_fix, description_fix)\n\n # Fix 11: Fix MARITAL_STATUS coding:\n\n code_fix = {\n '_T': 'Total'\n }\n\n description_fix = dict()\n\n fix_dimension('MARITAL_STATUS', code_fix, description_fix)\n\n # Fix 12: Fix MINISTER_PORTFOLIO coding:\n\n code_fix = dict()\n\n description_fix = {\n '7': 'Housing and Urban Affairs',\n '20': 'Justice'\n }\n\n fix_dimension('MINISTER_PORTFOLIO', code_fix, description_fix)\n\n # Fix 13: Fix NATURE coding:\n\n code_fix = dict()\n\n description_fix = {\n 'M': 'Modeled'\n }\n\n fix_dimension('NATURE', code_fix, description_fix)\n\n # Fix 14: Fix OCCUPATION coding:\n\n code_fix = {\n 'ISCO08_101': 'Commissioned armed forces officers',\n 'ISCO08_102': 'Non-commissioned armed forces officers',\n 'ISCO08_103': 'Armed forces occupations, other ranks'\n }\n\n description_fix = dict()\n\n fix_dimension('OCCUPATION', code_fix, description_fix)\n\n # Fix 15: Fix REF_AREA coding:\n\n code_fix = dict()\n\n description_fix = {\n \"1\": \"World\",\n \"2\": \"Africa\",\n \"4\": \"Afghanistan\",\n \"5\": \"South America\",\n \"8\": \"Albania\",\n \"9\": \"Oceania\",\n \"11\": \"Western Africa\",\n \"12\": \"Algeria\",\n \"13\": \"Central America\",\n \"14\": \"Eastern Africa\",\n \"15\": \"Northern Africa\",\n \"16\": \"American Samoa\",\n \"17\": \"Middle Africa\",\n \"18\": \"Southern Africa\",\n \"19\": \"Americas\",\n \"20\": \"Andorra\",\n \"21\": \"Northern America\",\n \"24\": \"Angola\",\n \"28\": \"Antigua and Barbuda\",\n \"29\": \"Caribbean\",\n \"30\": \"Eastern Asia\",\n \"31\": \"Azerbaijan\",\n \"32\": \"Argentina\",\n \"34\": \"Southern Asia\",\n \"35\": \"South-Eastern Asia\",\n \"36\": \"Australia\",\n \"39\": \"Southern Europe\",\n \"40\": \"Austria\",\n \"44\": \"Bahamas\",\n \"48\": \"Bahrain\",\n \"50\": \"Bangladesh\",\n \"51\": \"Armenia\",\n \"52\": \"Barbados\",\n \"53\": \"Australia and New Zealand\",\n \"54\": \"Melanesia\",\n \"56\": \"Belgium\",\n \"57\": \"Micronesia\",\n \"60\": \"Bermuda\",\n \"61\": \"Polynesia\",\n \"62\": \"Central and Southern Asia\",\n \"64\": \"Bhutan\",\n \"68\": \"Bolivia (Plurinational State of)\",\n \"70\": \"Bosnia and Herzegovina\",\n \"72\": \"Botswana\",\n \"76\": \"Brazil\",\n \"84\": \"Belize\",\n \"90\": \"Solomon Islands\",\n \"92\": \"British Virgin Islands\",\n \"96\": \"Brunei Darussalam\",\n \"100\": \"Bulgaria\",\n \"104\": \"Myanmar\",\n \"108\": \"Burundi\",\n \"112\": \"Belarus\",\n \"116\": \"Cambodia\",\n \"120\": \"Cameroon\",\n \"124\": \"Canada\",\n \"132\": \"Cabo Verde\",\n \"136\": \"Cayman Islands\",\n \"140\": \"Central African Republic\",\n \"142\": \"Asia\",\n \"143\": \"Central Asia\",\n \"144\": \"Sri Lanka\",\n \"145\": \"Western Asia\",\n \"148\": \"Chad\",\n \"150\": \"Europe\",\n \"151\": \"Eastern Europe\",\n \"152\": \"Chile\",\n \"154\": \"Northern Europe\",\n \"155\": \"Western Europe\",\n \"156\": \"China\",\n \"158\": \"China, Taiwan Province of China\",\n \"170\": \"Colombia\",\n \"174\": \"Comoros\",\n \"175\": \"Mayotte\",\n \"178\": \"Congo\",\n \"180\": \"Democratic Republic of the Congo\",\n \"184\": \"Cook Islands\",\n \"188\": \"Costa Rica\",\n \"191\": \"Croatia\",\n \"192\": \"Cuba\",\n \"196\": \"Cyprus\",\n \"199\": \"Least Developed Countries (LDCs)\",\n \"202\": \"Sub-Saharan Africa\",\n \"203\": \"Czechia\",\n \"204\": \"Benin\",\n \"208\": \"Denmark\",\n \"212\": \"Dominica\",\n \"214\": \"Dominican Republic\",\n \"218\": \"Ecuador\",\n \"222\": \"El Salvador\",\n \"226\": \"Equatorial Guinea\",\n \"231\": \"Ethiopia\",\n \"232\": \"Eritrea\",\n \"233\": \"Estonia\",\n \"234\": \"Faroe Islands\",\n \"238\": \"Falkland Islands (Malvinas)\",\n \"242\": \"Fiji\",\n \"246\": \"Finland\",\n \"250\": \"France\",\n \"254\": \"French Guiana\",\n \"258\": \"French Polynesia\",\n \"262\": \"Djibouti\",\n \"266\": \"Gabon\",\n \"268\": \"Georgia\",\n \"270\": \"Gambia\",\n \"275\": \"State of Palestine\",\n \"276\": \"Germany\",\n \"288\": \"Ghana\",\n \"292\": \"Gibraltar\",\n \"296\": \"Kiribati\",\n \"300\": \"Greece\",\n \"304\": \"Greenland\",\n \"308\": \"Grenada\",\n \"312\": \"Guadeloupe\",\n \"316\": \"Guam\",\n \"320\": \"Guatemala\",\n \"324\": \"Guinea\",\n \"328\": \"Guyana\",\n \"332\": \"Haiti\",\n \"336\": \"Holy See\",\n \"340\": \"Honduras\",\n \"344\": \"China, Hong Kong Special Administrative Region\",\n \"348\": \"Hungary\",\n \"352\": \"Iceland\",\n \"356\": \"India\",\n \"360\": \"Indonesia\",\n \"364\": \"Iran (Islamic Republic of)\",\n \"368\": \"Iraq\",\n \"372\": \"Ireland\",\n \"376\": \"Israel\",\n \"380\": \"Italy\",\n \"384\": \"Côte d'Ivoire\",\n \"388\": \"Jamaica\",\n \"392\": \"Japan\",\n \"398\": \"Kazakhstan\",\n \"400\": \"Jordan\",\n \"404\": \"Kenya\",\n \"408\": \"Democratic People's Republic of Korea\",\n \"410\": \"Republic of Korea\",\n \"414\": \"Kuwait\",\n \"417\": \"Kyrgyzstan\",\n \"418\": \"Lao People's Democratic Republic\",\n \"419\": \"Latin America and the Caribbean\",\n \"420\": \"Latin America\",\n \"422\": \"Lebanon\",\n \"426\": \"Lesotho\",\n \"428\": \"Latvia\",\n \"430\": \"Liberia\",\n \"432\": \"Landlocked developing countries (LLDCs)\",\n \"434\": \"Libya\",\n \"438\": \"Liechtenstein\",\n \"440\": \"Lithuania\",\n \"442\": \"Luxembourg\",\n \"446\": \"China, Macao Special Administrative Region\",\n \"450\": \"Madagascar\",\n \"454\": \"Malawi\",\n \"458\": \"Malaysia\",\n \"462\": \"Maldives\",\n \"466\": \"Mali\",\n \"470\": \"Malta\",\n \"474\": \"Martinique\",\n \"478\": \"Mauritania\",\n \"480\": \"Mauritius\",\n \"484\": \"Mexico\",\n \"492\": \"Monaco\",\n \"496\": \"Mongolia\",\n \"498\": \"Republic of Moldova\",\n \"499\": \"Montenegro\",\n \"500\": \"Montserrat\",\n \"504\": \"Morocco\",\n \"508\": \"Mozambique\",\n \"512\": \"Oman\",\n \"513\": \"Europe and Northern America\",\n \"514\": \"​Developed\",\n \"515\": \"​Developing\",\n \"516\": \"Namibia\",\n \"520\": \"Nauru\",\n \"524\": \"Nepal\",\n \"528\": \"Netherlands\",\n \"531\": \"Curaçao\",\n \"533\": \"Aruba\",\n \"534\": \"Sint Maarten (Dutch part)\",\n \"540\": \"New Caledonia\",\n \"543\": \"Oceania (exc. Australia and New Zealand)\",\n \"548\": \"Vanuatu\",\n \"554\": \"New Zealand\",\n \"558\": \"Nicaragua\",\n \"562\": \"Niger\",\n \"566\": \"Nigeria\",\n \"570\": \"Niue\",\n \"578\": \"Norway\",\n \"580\": \"Northern Mariana Islands\",\n \"583\": \"Micronesia (Federated States of)\",\n \"584\": \"Marshall Islands\",\n \"585\": \"Palau\",\n \"586\": \"Pakistan\",\n \"591\": \"Panama\",\n \"598\": \"Papua New Guinea\",\n \"600\": \"Paraguay\",\n \"604\": \"Peru\",\n \"608\": \"Philippines\",\n \"616\": \"Poland\",\n \"620\": \"Portugal\",\n \"624\": \"Guinea-Bissau\",\n \"626\": \"Timor-Leste\",\n \"630\": \"Puerto Rico\",\n \"634\": \"Qatar\",\n \"638\": \"Réunion\",\n \"642\": \"Romania\",\n \"643\": \"Russian Federation\",\n \"646\": \"Rwanda\",\n \"654\": \"Saint Helena\",\n \"659\": \"Saint Kitts and Nevis\",\n \"660\": \"Anguilla\",\n \"662\": \"Saint Lucia\",\n \"666\": \"Saint Pierre and Miquelon\",\n \"670\": \"Saint Vincent and the Grenadines\",\n \"674\": \"San Marino\",\n \"678\": \"Sao Tome and Principe\",\n \"682\": \"Saudi Arabia\",\n \"686\": \"Senegal\",\n \"688\": \"Serbia\",\n \"690\": \"Seychelles\",\n \"694\": \"Sierra Leone\",\n \"702\": \"Singapore\",\n \"703\": \"Slovakia\",\n \"704\": \"Viet Nam\",\n \"705\": \"Slovenia\",\n \"706\": \"Somalia\",\n \"710\": \"South Africa\",\n \"716\": \"Zimbabwe\",\n \"722\": \"Small island developing States (SIDS)\",\n \"724\": \"Spain\",\n \"728\": \"South Sudan\",\n \"729\": \"Sudan\",\n \"740\": \"Suriname\",\n \"747\": \"Northern Africa and Western Asia\",\n \"748\": \"Eswatini\",\n \"752\": \"Sweden\",\n \"753\": \"Eastern and South-Eastern Asia\",\n \"756\": \"Switzerland\",\n \"760\": \"Syrian Arab Republic\",\n \"762\": \"Tajikistan\",\n \"764\": \"Thailand\",\n \"768\": \"Togo\",\n \"772\": \"Tokelau\",\n \"776\": \"Tonga\",\n \"780\": \"Trinidad and Tobago\",\n \"784\": \"United Arab Emirates\",\n \"788\": \"Tunisia\",\n \"792\": \"Turkey\",\n \"795\": \"Turkmenistan\",\n \"796\": \"Turks and Caicos Islands\",\n \"798\": \"Tuvalu\",\n \"800\": \"Uganda\",\n \"804\": \"Ukraine\",\n \"807\": \"North Macedonia\",\n \"818\": \"Egypt\",\n \"826\": \"United Kingdom\",\n \"830\": \"Channel Islands\",\n \"833\": \"Isle of Man\",\n \"834\": \"United Republic of Tanzania\",\n \"840\": \"United States of America\",\n \"850\": \"United States Virgin Islands\",\n \"854\": \"Burkina Faso\",\n \"858\": \"Uruguay\",\n \"860\": \"Uzbekistan\",\n \"862\": \"Venezuela (Bolivarian Republic of)\",\n \"876\": \"Wallis and Futuna Islands\",\n \"882\": \"Samoa\",\n \"887\": \"Yemen\",\n \"894\": \"Zambia\",\n \"910\": \"High income economies (WB)\",\n \"911\": \"Low income economies (WB)\",\n \"912\": \"Lower middle economies (WB)\",\n \"914\": \"Upper middle economies (WB)\"\n }\n\n fix_dimension('REF_AREA', code_fix, description_fix)\n\n # Fix 16: Fix SEX coding:\n\n code_fix = {\n 'M': 'Male',\n 'F': 'Female'\n }\n\n description_fix = {\n '_T': 'Both sexes or no breakdown by sex',\n 'M': 'Male',\n 'F': 'Female'\n }\n\n fix_dimension('SEX', code_fix, description_fix)\n\n # Fix 17: Fix UNIT_MULT coding:\n\n code_fix = {\n '0': 'Units'\n }\n\n description_fix = {\n '0': 'Units'\n }\n\n fix_dimension('UNIT_MULT', code_fix, description_fix)\n\n\nwith open(\"data/02_AllData.json\", \"w\") as write_file:\n json.dump(data, write_file, indent=4)\n\nprint(\"The following series are not in the series catalog:\")\nprint(check)\n","sub_path":"scripts/deadwood/script01c_fixCodes.py","file_name":"script01c_fixCodes.py","file_ext":"py","file_size_in_byte":14416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"568280630","text":"from tkinter import *\n\nroot = Tk()\n\n#create text\n#wrap = WORD позволяет переносить целое слово со строки на строку а не по буквам\ntext = Text(width = 30, height = 10, bg = \"darkgreen\", fg = 'white', wrap = WORD)\ntext.pack(side = LEFT)\n\n#create scrollbar\n#с помощью command присваевается прокрутка теста по оси y\n#fill = Y, опускает scrollbar до конца страницы\nscroll = Scrollbar(command = text.yview)\nscroll.pack(side = LEFT, fill = Y)\n\n#В свою очередь текстовому полю опцией yscrollcommand устанавливается ранее созданный скроллер – scroll.set\ntext.config(yscrollcommand = scroll.set)\n\nroot.mainloop()","sub_path":"tkinter/textAndScrollbar.py","file_name":"textAndScrollbar.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"492599231","text":"# Global imports\nimport torch\nimport torch.nn as nn\n\n\nclass Angle_Loss(nn.Module):\n \"\"\" Computes average L1 Loss only where beetle is in ground truth data. \"\"\"\n\n def __init__(self):\n super(Angle_Loss, self).__init__()\n\n def forward(self, input, target):\n tmp = torch.clamp(target, 0, 1)\n\n # When beetles orientation in ground truth data is 0 degrees, you can't know where the beetle is located\n if torch.equal(tmp, torch.zeros_like(tmp)):\n output = torch.sum(input) / torch.numel(input)\n else:\n nonZero = torch.nonzero(target)\n output = torch.sum(torch.abs(target - tmp * input)) / torch.numel(nonZero)\n return output\n","sub_path":"src/framework/loss/custom_angle_loss.py","file_name":"custom_angle_loss.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"367514389","text":"import got, codecs\nfrom pymongo import MongoClient\n#import numpy\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\nclient = MongoClient('localhost', 27017)\ndb = client['twitter_db']\ncollection = db['curtweets']\nnow = datetime.now()\nd = datetime.today() - timedelta(days=1)\ntweetCriteria = got.manager.TweetCriteria().setSince(str(d)[:10]).setUntil(str(now)[:10]).setMaxTweets(6000).setQuerySearch('india flood OR earthquake OR rains OR landslide')\n#tweetCriteria = got.manager.TweetCriteria().setSince(\"2017-08-27\").setUntil(\"2017-08-30\").setMaxTweets(6000).setQuerySearch('flood india OR earthquake OR rains OR landslide')\ndef streamTweets(tweets):\n for t in tweets:\n obj = {\"user\": t.username, \"retweets\": t.retweets, \"favorites\":\n t.favorites, \"text\":t.text,\"geo\": t.geo,\"mentions\":\n t.mentions, \"hashtags\": t.hashtags,\"id\": t.id,\n \"permalink\": t.permalink,}\n tweetind = collection.insert_one(obj).inserted_id\ngot.manager.TweetManager.getTweets(tweetCriteria, streamTweets)\n","sub_path":"corpusmngDB.py","file_name":"corpusmngDB.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"169703534","text":"from doubly_linked_list import DoublyLinkedList\n\n\nclass RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = None\n self.storage = DoublyLinkedList()\n\n def append(self, item):\n if self.storage.length != self.capacity:\n self.storage.add_to_tail(item)\n else:\n if self.current is None:\n self.storage.remove_from_head()\n self.storage.add_to_head(item)\n self.current = self.storage.head\n else:\n if self.current.next is not None:\n self.current.next.value = item\n self.current = self.current.next\n else:\n self.storage.head.value = item\n self.current = self.storage.head\n \n\n def get(self):\n # Note: This is the only [] allowed\n list_buffer_contents = []\n curr_node = self.storage.head\n for _ in range(self.storage.length):\n list_buffer_contents.append(curr_node.value)\n curr_node = curr_node.next\n\n return list_buffer_contents\n\n# ----------------Stretch Goal-------------------\n\n\nclass ArrayRingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = None\n self.storage = [0]*capacity\n\n def append(self, item):\n if self.current is None:\n self.storage.pop(0)\n self.storage.insert(0, item)\n self.current = 0\n else:\n if self.current <= self.capacity-2:\n self.storage[self.current + 1] = item\n self.current += 1\n else:\n self.storage[0] = item\n self.current = 0\n\n def get(self):\n return self.storage\n","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"382776492","text":"# -*- coding: utf-8 -*-\n\nimport codecs, os\n\ndef _parz(msg):\n pz = msg.splitlines()\n mo = dict()\n optz = pz[0].split('/')\n mo.update( dict(zip(optz[::2],optz[1::2])) )\n for i,n in enumerate(('echoarea','date','msgfrom','addr','msgto','subj'),1):\n mo[n] = pz[i]\n mo['msg'] = '\\n'.join(pz[8:])\n mo['date'] = int(mo['date'])\n return mo\n\nf = codecs.open('../newmsg.txt','w','utf-8')\nfor m in open('.newmsg').read().splitlines():\n mo = _parz(codecs.open('msg/%s' % m,'r','utf-8').read())\n buf = m + '\\n' + mo['msgfrom'] + ' (' + str(mo['addr']) + ')\\nmsgto: ' + mo['msgto'] + '\\n' + mo['subj'] + '\\n\\n' + mo['msg']\n f.write('== %s ========================= ' % mo['echoarea'] + buf + '\\n\\n\\n')\nf.close()\n","sub_path":"ii-txt/.py/newmsg.py","file_name":"newmsg.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"210020311","text":"# Time complexity: O(nlog n)\n# Space complexity: O(n)\n\n# Idea is to split the array into two halves sort the two halves while merging them together. Divide and conquer strategy\n# Python program for implementation of MergeSort\ndef mergeSort(arr):\n if len(arr) == 1:\n return arr\n mid = len(arr)//2\n left = mergeSort(arr[:mid])\n right = mergeSort(arr[mid:])\n return merge(left, right)\n\n\ndef merge(left, right):\n left_idx, right_idx = 0, 0\n result = []\n # as long as the indices are within limits, add the minimum elements to the result array and advance respective indices\n while left_idx < len(left) and right_idx < len(right):\n if left[left_idx] < right[right_idx]:\n result.append(left[left_idx])\n left_idx += 1\n else:\n result.append(right[right_idx])\n right_idx += 1\n # if there are any elements left in either left or right array, add the to the result\n if left_idx < len(left):\n result += left[left_idx:]\n if right_idx < len(right):\n result += right[right_idx:]\n return result\n\n# Code to print the list\n\n\ndef printList(arr):\n print(arr)\n\n\n# driver code to test the above code\nif __name__ == '__main__':\n arr = [12, 11, 13, 5, 6, 7]\n print(\"Given array is\", end=\"\\n\")\n printList(arr)\n arr = mergeSort(arr)\n print(\"Sorted array is: \", end=\"\\n\")\n printList(arr)\n","sub_path":"Exercise_4.py","file_name":"Exercise_4.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552404340","text":"# This file is part of Peach-Py package and is licensed under the Simplified BSD license.\n# See license.rst for the full text of the license.\n\n\nabi = None\ntarget = None\ndebug_level = 0\npackage = None\nassembly_format = \"go\"\ngenerate_assembly = None\nrtl_dump_file = None\nname_mangling = \"${Name}\"\n\n\ndef get_debug_level():\n import peachpy.x86_64.function as function\n if function.active_function is None:\n return debug_level\n else:\n return function.active_function.debug_level\n","sub_path":"peachpy/x86_64/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"319246165","text":"\"\"\"Web URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom apps.general.views import index, index1, index2, index3, index4, nosotros_view, productos_view, servicios_view,contactanos_view\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', index, name = 'index'),\n url(r'^1/$', index1, name = 'index1'),\n url(r'^2/$', index2, name = 'index2'),\n url(r'^3/$', index3, name = 'index3'),\n url(r'^4/$', index4, name = 'index4'),\n url(r'^nosotros/$', nosotros_view, name = 'nosotros'),\n url(r'^productos/$', productos_view, name = 'productos'),\n url(r'^servicios/$', servicios_view, name = 'servicios'),\n url(r'^contactanos/$', contactanos_view, name = 'contactanos'),\n]\n","sub_path":"Web/Web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"122722766","text":"#!/usr/bin/env python3\n\nimport sys\nimport fontforge\n\nfont = fontforge.open(sys.argv[1])\n\n# Rename font\nfont.fontname = font.fontname.replace(\"-\", \"Condensed-\")\nfont.familyname += \" Condensed\"\nfont.fullname += \" Condensed\"\n\n# Condense\nfont.selection.all()\nfont.condenseExtend(0.85, 0)\nfont.round()\n\n# Save\nfont.generate(sys.argv[2])\n","sub_path":"bin/condense.py","file_name":"condense.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"14621070","text":"class Solution:\n \"\"\"\n @param coins: a list of integer\n @param amount: a total amount of money amount\n @return: the fewest number of coins that you need to make up\n \"\"\"\n\n def coinChange(self, coins, amount):\n # write your code here\n dp = [0] + [float('inf')] * amount\n for i in range(1, amount + 1):\n for j in range(len(coins)):\n if coins[j] <= i:\n dp[i] = min(dp[i], dp[i - coins[j]] + 1)\n return dp[amount] if dp[amount] <= amount else -1\n","sub_path":"lintcode/669-coin-change.py","file_name":"669-coin-change.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"205005914","text":"from django.db import models\n\n\nUSER_ROLES = (\n ('ST', 'Student'),\n ('TE', 'Teacher')\n)\n\n\nclass UserRoleField(models.CharField):\n \"\"\"\n A CharField representing the user role\n Can be ST (Student) or TE (Teacher)\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs['default'] = 'ST'\n kwargs['max_length'] = 2\n kwargs['choices'] = USER_ROLES\n super(UserRoleField, self).__init__(*args, **kwargs)\n","sub_path":"get_a_room/accounts/my_fields.py","file_name":"my_fields.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"113259377","text":"import prof\nimport subprocess\n\ndef _cmd_ascii(text):\n recipient = prof.get_current_recipient()\n if recipient:\n proc = subprocess.Popen(['figlet', '--', text], stdout=subprocess.PIPE)\n ascii_out = proc.communicate()[0].decode('utf-8')\n prof.send_line(u'\\u000A' + ascii_out)\n\ndef prof_init(version, status):\n prof.register_command(\"/ascii\", 1, 1, \"/ascii\", \"ASCIIfy a message\", \"ASCIIfy a message.\", _cmd_ascii)\n","sub_path":"ascii.py","file_name":"ascii.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"18227113","text":"# -*- coding: utf-8 -*-\n# @Author : William\n# @Project : TextGAN-william\n# @FileName : maligan_instructor.py\n# @Time : Created at 2019/10/17\n# @Blog : http://zhiweil.ml/\n# @Description : \n# Copyrights (C) 2018. All Rights Reserved.\n\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport config as cfg\nfrom instructor.oracle_data.instructor import BasicInstructor\nfrom models.MaliGAN_D import MaliGAN_D\nfrom models.MaliGAN_G import MaliGAN_G\nfrom utils.data_loader import GenDataIter, DisDataIter\n\n\nclass MaliGANInstructor(BasicInstructor):\n def __init__(self, opt):\n super(MaliGANInstructor, self).__init__(opt)\n\n # generator, discriminator\n self.gen = MaliGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,\n cfg.padding_idx, gpu=cfg.CUDA)\n self.dis = MaliGAN_D(cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)\n self.init_model()\n\n # Optimizer\n self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)\n self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)\n self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)\n\n def _run(self):\n # ===PRE-TRAINING===\n # TRAIN GENERATOR\n if not cfg.gen_pretrain:\n self.log.info('Starting Generator MLE Training...')\n self.pretrain_generator(cfg.MLE_train_epoch)\n if cfg.if_save and not cfg.if_test:\n torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)\n print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))\n\n # ===TRAIN DISCRIMINATOR====\n if not cfg.dis_pretrain:\n self.log.info('Starting Discriminator Training...')\n self.train_discriminator(cfg.d_step, cfg.d_epoch)\n if cfg.if_save and not cfg.if_test:\n torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)\n print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))\n\n # ===ADVERSARIAL TRAINING===\n self.log.info('Starting Adversarial Training...')\n self.log.info('Initial generator: %s' % (self.cal_metrics(fmt_str=True)))\n\n for adv_epoch in range(cfg.ADV_train_epoch):\n self.log.info('-----\\nADV EPOCH %d\\n-----' % adv_epoch)\n self.sig.update()\n if self.sig.adv_sig:\n self.adv_train_generator(cfg.ADV_g_step) # Generator\n self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator\n\n if adv_epoch % cfg.adv_log_step == 0 or adv_epoch == cfg.ADV_train_epoch - 1:\n if cfg.if_save and not cfg.if_test:\n self._save('ADV', adv_epoch)\n else:\n self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')\n break\n\n def _test(self):\n print('>>> Begin test...')\n\n self._run()\n pass\n\n def pretrain_generator(self, epochs):\n \"\"\"\n Max Likelihood Pre-training for the generator\n \"\"\"\n for epoch in range(epochs):\n self.sig.update()\n if self.sig.pre_sig:\n pre_loss = self.train_gen_epoch(self.gen, self.oracle_data.loader, self.mle_criterion, self.gen_opt)\n\n # ===Test===\n if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:\n self.log.info(\n '[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))\n if cfg.if_save and not cfg.if_test:\n self._save('MLE', epoch)\n else:\n self.log.info('>>> Stop by pre signal, skip to adversarial training...')\n break\n\n def adv_train_generator(self, g_step):\n \"\"\"\n The gen is trained by MLE-like objective.\n \"\"\"\n total_g_loss = 0\n for step in range(g_step):\n inp, target = GenDataIter.prepare(self.gen.sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)\n\n # ===Train===\n rewards = self.get_mali_reward(target)\n adv_loss = self.gen.adv_loss(inp, target, rewards)\n self.optimize(self.gen_adv_opt, adv_loss)\n total_g_loss += adv_loss.item()\n\n # ===Test===\n self.log.info('[ADV-GEN]: g_loss = %.4f, %s' % (total_g_loss, self.cal_metrics(fmt_str=True)))\n\n def train_discriminator(self, d_step, d_epoch, phase='MLE'):\n \"\"\"\n Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).\n Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.\n \"\"\"\n # prepare loader for validate\n global d_loss, train_acc\n pos_val = self.oracle.sample(8 * cfg.batch_size, 4 * cfg.batch_size)\n neg_val = self.gen.sample(8 * cfg.batch_size, 4 * cfg.batch_size)\n dis_eval_data = DisDataIter(pos_val, neg_val)\n\n for step in range(d_step):\n # prepare loader for training\n pos_samples = self.oracle_samples # not re-sample the Oracle data\n neg_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)\n dis_data = DisDataIter(pos_samples, neg_samples)\n\n for epoch in range(d_epoch):\n # ===Train===\n d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,\n self.dis_opt)\n\n # ===Test===\n _, eval_acc = self.eval_dis(self.dis, dis_eval_data.loader, self.dis_criterion)\n self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f, eval_acc = %.4f,' % (\n phase, step, d_loss, train_acc, eval_acc))\n\n if cfg.if_save and not cfg.if_test:\n torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)\n\n def get_mali_reward(self, samples):\n rewards = []\n for _ in range(cfg.rollout_num):\n dis_out = F.softmax(self.dis(samples), dim=-1)[:, 1]\n rewards.append(dis_out)\n\n rewards = torch.mean(torch.stack(rewards, dim=0), dim=0) # batch_size\n rewards = torch.div(rewards, 1 - rewards)\n rewards = torch.div(rewards, torch.sum(rewards))\n rewards -= torch.mean(rewards)\n rewards = rewards.unsqueeze(1).expand(samples.size()) # batch_size * seq_len\n\n return rewards\n","sub_path":"instructor/oracle_data/maligan_instructor.py","file_name":"maligan_instructor.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"145494673","text":"import re\nimport itertools\n\n\ndef is_string(string):\n return isinstance(string, str)\n\n\nprint(is_string('hello')) # True\nprint(is_string(['hello'])) # False\nprint(is_string('this is a long sentence')) # True\nprint(is_string({'a': 2})) # False\n\n\ndef is_only_string(string):\n return type(string) is str and \\\n \" \" in string and any(char.isdigit() for char in string)\n\n\nprint(is_only_string('11')) # False\nprint(is_only_string(['hello'])) # ? Please handle this case!! Should return False\nprint(is_only_string('this is a long sentence')) # False\nprint(is_only_string({'a': 2})) # # ? Please handle this case!! Should return False\nprint(is_only_string(\"1 2\"))\n\n\ndef is_alphanumeric(string):\n return is_string(string) and re.match(\"[0-9a-zA-z]\", string) and re.match(\"[0-9]+\", string)\n\n\nprint(is_alphanumeric('11')) # True\nprint(is_alphanumeric(['hello'])) # False\nprint(is_alphanumeric('this is a long sentence')) # False\nprint(is_alphanumeric({'a': 2})) # False\nprint(is_alphanumeric(\"this is string....!!!\")) # False\n\n\ndef is_array_or_tuple(string):\n if isinstance(string, list) or isinstance(string, tuple):\n return \"is_array_or_tuple is true\"\n else:\n return \"is_array_or_tuple is false\"\n\n\nprint(is_array_or_tuple('hello')) # False\nprint(is_array_or_tuple(['hello'])) # True\nprint(is_array_or_tuple([2, {}, 10])) # True\nprint(is_array_or_tuple({'a': 2})) # False\nprint(is_array_or_tuple((1, 2))) #\nprint(is_array_or_tuple(set()))\n\n\ndef are_same_type(some_input):\n first = type(some_input[0])\n for element in some_input[1:]:\n if not isinstance(element, first):\n return False\n return True\n\n\ndef are_same_type_alt(some_input):\n iseq = iter(some_input)\n first = type(next(iseq))\n return all((type(x) is first) for x in iseq)\n\n\nprint(are_same_type_alt(['hello', 'world', 'long sentence'])) # True\nprint(are_same_type_alt([1, 2, 9, 10])) # True\nprint(are_same_type_alt([1, 2, 9, 10, \"hello\"])) # False\nprint(are_same_type([['hello'], 'hello', ['bye']])) # False\nprint(are_same_type([['hello'], [1, 2, 3], [{'a': 2}]])) # True\nprint(are_same_type([['hello'], set('hello')])) # False\n\n\ndef longest_string(s1, s2):\n if type(s1) is not str or type(s2) is not str:\n return False\n # testing that inputs are strings containing lowercase a-z\n if not re.match(\"[a-z]\", s1) and not re.match(\"[a-z]\", s2):\n return False\n else:\n empty = []\n S = s1+s2\n for element in S:\n if element not in empty:\n empty.append(element)\n empty = sorted(empty)\n empty = ''.join(empty)\n return empty\n\n\na = 'xyaabbbccccdefww'\nb = 'xxxxyyyyabklmopq'\nx = 'abcdefghijklmnopqrstuvwxyz'\ny = 12\n\n\nprint(longest_string(a, b)) # abcdefklmopqwxy\nprint(longest_string(a, x)) # abcdefghijklmnopqrstuvwxyz\nprint(longest_string(a, y)) # False\n\n\ndef convert(number):\n string = str(number)\n new_list = []\n for element in string:\n new_list.append(element)\n return sorted(new_list, reverse=True)\n\n\nprint(convert(429563)) # [9, 6, 5, 4, 3, 2]\nprint(convert(324)) # [4, 3, 2]\n\n\ndef count_repetition(some_list):\n dictionary = {}\n for element in some_list:\n if element not in dictionary:\n dictionary[element] = 1\n else:\n dictionary[element] += 1\n return dictionary\n\n\nprint(count_repetition(['kerouac', 'fante', 'fante', 'buk', 'hemingway', 'hornby', 'kerouac', 'buk', 'fante']))\n# {'kerouac': 2, 'fante': 3, 'buk': 2, 'hemingway': 1, 'hornby': 1}\n\n\ndef is_caught(string):\n c = string.find(\"C\") + 1\n m = string.find(\"m\")\n return len(string[c:m]) < 3\n\n\nprint(is_caught('C.....m')) # False\nprint(is_caught('C..m')) # True\nprint(is_caught('..C..m')) # True\nprint(is_caught('...C...m')) # False\nprint(is_caught('C.m')) # True\n\n\ndef split_the_bill(group):\n value_sum = 0\n # compute the average\n for key, value in group.items():\n value_sum += value\n avg = value_sum / len(group)\n # determine the difference from what was actually paid\n for key, value in group.items():\n group[key] = avg - value\n return group\n\n\ngroup = {\n 'Amy': 20,\n 'Bill': 15,\n 'Chris': 10\n}\nprint(split_the_bill(group)) # { 'Amy': -5, 'Bill': 0, 'Chris': 5 }\n\n\ndef exp_recursive(b, n):\n if n == 0:\n return 1\n if n >= 1:\n return b * exp_recursive(b, n - 1)\n\n\nprint(exp_recursive(5, 3)) # 125\nprint(exp_recursive(2, 4)) # 16\nprint(exp_recursive(5, 1)) # 5\nprint(exp_recursive(6, 0)) # 1\n\n\ndef zero_sum(arr):\n # making sure input is a list of numbers\n if not (are_same_type(arr) and isinstance(arr[0], int)):\n return False\n list_of_pos = []\n for i in range(len(arr)):\n # start second loop at i so you don't repeat the i's you've already been through\n # also to prevent repetition\n for j in range(i, len(arr)):\n if arr[i] + arr[j] == 0:\n index = [i, j]\n list_of_pos.append(index)\n # with list comprehension\n # [[i,j] for i in range(len(arr)) for j in range(i, len(arr)) if arr[i] + arr[j] == 0]\n if len(list_of_pos) >= 1:\n return list_of_pos[:]\n\n\nprint(zero_sum([1, 5, 0, -5, 3, -1])) # [[0, 5], [1, 3], [2, 2]]\nprint(zero_sum([1, -1])) # [[0, 1]]\nprint(zero_sum([0, 4, 3, 5])) # [[0, 0]]\n\n\ndef count_upper_lower():\n sentence = str(input(\"Sentence to evaluate: \"))\n upper_count = 0\n lower_count = 0\n for i in sentence:\n if i.isupper():\n upper_count += 1\n if i.islower():\n lower_count += 1\n return \"UPPER CASE {} LOWER CASE {}\".format(upper_count, lower_count)\n\n\n# print(count_upper_lower())\n\n\ndef new_dict(dict_input):\n new = current = {}\n for name in dict_input:\n current[name] = {}\n current = current[name]\n return new\n\n\nprint(new_dict([1, 2, 3, 4, 5])) # {1: {2: {3: {4: {5: {}}}}}}\n\n\ndef banking():\n amount = 0\n while True:\n deposit = input(\"deposit: \")\n if not deposit:\n break\n else:\n deposit = int(deposit)\n amount += deposit\n while True:\n withdraw = input(\"withdraw: \")\n if not withdraw:\n break\n else:\n withdraw = int(withdraw)\n amount += withdraw\n return amount\n\n\n# print(banking())\n\n\ndef print_dictionary():\n newer_dict = {}\n for i in range(1, 21):\n newer_dict[i] = i**2\n print(newer_dict[i])\n\n\n# print_dictionary()\n\n\ndef permute(some_list):\n return list(itertools.permutations(some_list, 3))\n\n\nprint(permute([1, 2, 3])) # [[3, 2, 1], [2, 3, 1], [2, 1, 3], [3, 1, 2], [1, 3, 2], [1, 2, 3]]\n\n\ndef perms(nums):\n result_perms = [[]]\n for n in nums:\n new_perms = []\n for perm in result_perms:\n for i in range(len(perm) + 1):\n new_perms.append(perm[:i] + [n] + perm[i:])\n result_perms = new_perms\n return result_perms\n\n\nprint(perms([1, 2, 3]))\n\n\ndef zero_nine(num):\n num = num % 10\n if num == 1:\n word = 'one'\n return word\n if num == 2:\n word = 'two'\n return word\n if num == 3:\n word = 'three'\n return word\n if num == 4:\n word = 'four'\n return word\n if num == 5:\n word = 'five'\n return word\n if num == 6:\n word = 'six'\n return word\n if num == 7:\n word = 'seven'\n return word\n if num == 8:\n word = 'eight'\n return word\n if num == 9:\n word = 'nine'\n return word\n if num == 0:\n word = 'zero'\n return word\n else:\n word = ''\n return word\n\n\ndef teens(num):\n if num == 11:\n word = 'eleven'\n return word\n if num == 12:\n word = 'twelve'\n return word\n if num == 13:\n word = 'thirteen'\n return word\n if num == 14:\n word = 'fourteen'\n return word\n if num == 15:\n word = 'fifteen'\n return word\n if num == 16:\n word = 'sixteen'\n return word\n if num == 17:\n word = 'seventeen'\n return word\n if num == 18:\n word = 'eighteen'\n return word\n if num == 19:\n word = 'nineteen'\n return word\n\n\ndef twenties(num):\n num = num - (num % 10)\n if num == 10:\n word = 'ten'\n return word\n if num == 20:\n word = 'twenty'\n return word\n if num == 30:\n word = 'thirty'\n return word\n if num == 40:\n word = 'fourty'\n return word\n else:\n word = ''\n return word\n\n\ndef write_number(num):\n if num < 10:\n return zero_nine(num)\n if num > 10 and num < 20:\n return teens(num)\n if num > 19 and num < 50:\n return twenties(num) + zero_nine(num)\n\n\n#\n# print(write_number(11)) # \"eleven\"\n# print(write_number(2)) # \"two\"\n# print(write_number(32)) # \"thirty-two\"\n# print(write_number(10))\n# print(write_number(44))\n","sub_path":"python/week1/day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":8951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307633078","text":"import os\nfrom math import sqrt, ceil\nfrom random import randint\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.models import Model\nfrom keras.models import load_model\nfrom augment import *\n\n\ndef visualize(model, layer_names, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (200, 100))\n\n for layer_name in layer_names:\n # Show layer\n layer_out = Model(input=model.layers[0].input, output=model.get_layer(layer_name).output)\n out = layer_out.predict(np.array([image]))\n\n print(out.shape)\n for i in range(out.shape[3]):\n result = np.empty((out.shape[1], out.shape[2], 3))\n result[:, :, 0] = out[0, :, :, i]\n result[:, :, 1] = out[0, :, :, i]\n result[:, :, 2] = out[0, :, :, i]\n result += 0.5\n cv2.imwrite(\"visualize/layer_{}_{}.png\".format(layer_name, i), result * 255)\n\n\ndef show_augmentation(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (200, 100))\n translated_image, shift = random_shift(image)\n cv2.imwrite(\"visualize/brightness.png\", augment_lightness(image))\n cv2.imwrite(\"visualize/shadow.png\", add_random_shadow(image))\n cv2.imwrite(\"visualize/translate.png\", translated_image)\n cv2.imwrite(\"visualize/flip.png\", cv2.flip(image, 1))\n\n\ndef random_image(folder):\n files = os.listdir(folder + \"/IMG/\")\n index = randint(0, len(files))\n return cv2.imread(folder + \"/IMG/\" + files[index])\n\n\nmodel = load_model('model.h5')\n# visualize(model, ['visual_1', 'color_space'], random_image('samples/from_side'))\nshow_augmentation(random_image('samples/from_side'))","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"284401452","text":"from argparse import Namespace\nfrom gooey import GooeyParser\nfrom pathlib import Path\nfrom typing import Optional\n\nimport numpy as np\nimport tensorflow as tf # type: ignore\n\nfrom .train_config import WEIGHTS\n\n\nclass TestConfig():\n NAME = None\n\n WEIGHT = 'last'\n\n RESULT_DIR = \"results/\"\n\n\ndef test_config_parser(\n parser: GooeyParser = GooeyParser(),\n title='Test Setting',\n test_config: TestConfig = TestConfig(),\n # modifiable: bool = True,\n ) -> GooeyParser:\n\n load_parser = parser.add_mutually_exclusive_group(\n 'Load Weights')\n load_parser.add_argument(\n '--load_pretrained_weights',\n choices=WEIGHTS,\n # default=test_config.WEIGHT,\n )\n # load_parser.add_argument(\n # '--load_specific_weights',\n # choices=\n # )\n load_parser.add_argument(\n '--load_pretrained_file',\n widget='FileChooser'\n )\n\n log_parser = parser.add_argument_group(\n 'Log',\n \"Save result options\",\n gooey_options={'show_border': True, 'columns': 2}\n )\n log_parser.add_argument(\n \"--result-path\", type=str,\n metavar='Result File Path.',\n default=(Path(test_config.RESULT_DIR).joinpath('untitled' if test_config.NAME is None\n else str(test_config.NAME))\n ).joinpath('result.csv'),\n help='{}{}TIME{}/result.csv'.format(\n Path(test_config.RESULT_DIR).joinpath('RESULT_NAME'),\n '{', '}')\n )\n\n return parser\n","sub_path":"model/keras_applications/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"138341504","text":"import os\nimport json\n\nfrom populus.utils.filesystem import (\n get_compiled_contracts_file_path,\n recursive_find_files,\n DEFAULT_CONTRACTS_DIR\n)\nfrom solc import (\n compile_files,\n)\nfrom solc.exceptions import (\n ContractsNotFound,\n)\n\n\ndef find_project_contracts(project_dir, contracts_rel_dir=DEFAULT_CONTRACTS_DIR):\n contracts_dir = os.path.join(project_dir, contracts_rel_dir)\n\n return tuple(\n os.path.relpath(p) for p in recursive_find_files(contracts_dir, \"*.sol\")\n )\n\n\ndef write_compiled_sources(project_dir, compiled_sources):\n compiled_contract_path = get_compiled_contracts_file_path(project_dir)\n\n with open(compiled_contract_path, 'w') as outfile:\n outfile.write(\n json.dumps(compiled_sources,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n )\n return compiled_contract_path\n\n\ndef compile_project_contracts(project_dir, contracts_dir, **compiler_kwargs):\n compiler_kwargs.setdefault('output_values', ['bin', 'bin-runtime', 'abi'])\n contract_source_paths = find_project_contracts(project_dir, contracts_dir)\n try:\n compiled_sources = compile_files(contract_source_paths, **compiler_kwargs)\n except ContractsNotFound:\n return contract_source_paths, {}\n\n return contract_source_paths, compiled_sources\n\n\ndef compile_and_write_contracts(project_dir, contracts_dir, **compiler_kwargs):\n contract_source_paths, compiled_sources = compile_project_contracts(\n project_dir,\n contracts_dir,\n **compiler_kwargs\n )\n\n output_file_path = write_compiled_sources(project_dir, compiled_sources)\n return contract_source_paths, compiled_sources, output_file_path\n","sub_path":"populus/compilation.py","file_name":"compilation.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315022112","text":"from sqlalchemy import (\n Column,\n Integer,\n String,\n Boolean,\n PrimaryKeyConstraint,\n)\nfrom .models import Base\n\n\nclass TrackRoute(Base):\n __tablename__ = \"track_routes\"\n\n # Actual URL slug for the track, includes collision_id\n slug = Column(String, nullable=False)\n # Just the title piece of the slug for the track, excludes collision_id\n # Used for finding max collision_id needed for duplicate title_slugs\n title_slug = Column(String, nullable=False)\n collision_id = Column(Integer, nullable=False)\n owner_id = Column(Integer, nullable=False)\n track_id = Column(Integer, nullable=False)\n is_current = Column(Boolean, nullable=False)\n blockhash = Column(String, nullable=False)\n blocknumber = Column(Integer, nullable=False)\n txhash = Column(String, nullable=False)\n\n PrimaryKeyConstraint(owner_id, slug)\n\n def __repr__(self):\n return f\"\"\n","sub_path":"discovery-provider/src/models/track_route.py","file_name":"track_route.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"93962458","text":"from phase_pattern import *\nfrom copy import deepcopy\n\nfirst = Phase()\n\n\ndef on_return(menu=None, user=None, message=None):\n key = message.content\n keys = [k['key'] for k in user.basket]\n if key in keys:\n i = keys.index(key)\n user.basket = user.basket[:i] + user.basket[i+1:] + user.basket[i]\n else:\n user.basket.append(deepcopy(menu.products[key]))\n # user.basket[-1]['amount']\n # user.basket[-1]['price']\n return user, 'NEXT', None\n\ndef on_call(menu=None, user=None, message=None):\n menu.database.to_log(user, message,'use_bonus')\n user_id = user.id\n total_price = [i['price'] for i in user.basket]*user.discount\n keys = products.keys()\n values = [products[key]['title'] for key in keys]\n\n callbacks = [key for key in keys]\n\n markup = first.get_buttons(values, callbacks, cols=2, bb=False, refb=True)\n MSG = 'Привет, я - чат-бот, ведущий прямой репортаж с морского дна!\\nС помощью меня можно заказать морепродукты на дом прямиком с Дальнего Востока!\\n(оформляя заказ через чат-бота, вы соглашаетесь на получение новостей о свежих поступлениях морепродуктов)\\n\\nСмотри что у нас есть:'\n menu.bot.send_message(user_id, MSG, reply_markup=markup)\n\n\ndef check_access(menu, user,message):\n if (message.type == 'callback') and (message.content in menu.products):\n return True\n else:\n return False\n\ndef on_undo(menu=None,user=None,message=None):\n user.basket = user.basket[:-1]\n return user\n\n\nsetattr(first, 'on_call', on_call)\nsetattr(first, 'check_access', check_access)\nsetattr(first, 'on_return',on_return)\nsetattr(first, 'on_undo', on_undo)\n# del on_call, on_return, check_access\n","sub_path":"branches/use_bonuses/steps/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"556324827","text":"import re\nfrom urllib import request\nfrom bs4 import BeautifulSoup\n\n#classes\nclass crawler:\n\n\t#class globals\n\t#regex for detecing malformed url\n\turlRegex = re.compile(\n\t\tr'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n\t#class functions\n\t#validate a url\n\tdef validUrl(self, url, error):\n\t\t\n\t\t#return if url is of valid form\n\t\tif re.search(self.urlRegex, url):\n\t\t\treturn True\n\t\t\n\t\t#return is url is invalid form and error flag is set\n\t\telif error:\n\t\t\tprint('invalid url form')\n\t\t\treturn False\n\t\t\n\t\t#return false in all other circumstances\n\t\telse:\n\t\t\treturn False\n\t\t\n\t#download page using url\n\tdef getPage(self, url):\t\t\n\t\t\n\t\ttry:\n\t\t\tpage = request.urlopen(url)\n\t\t\treturn page\n\t\t\t\n\t\texcept request.HTTPError:\n\t\t\tprint('error; could not open page', url)\t\t\n\t\t\treturn False\n\t\n\t#parse href links from page\n\tdef parseLinks(self, page):\n\t\tsoup = BeautifulSoup(page.read())\t\t\n\t\tlinksList = soup.find_all('a', href = True)\n\t\treturn linksList","sub_path":"lib/dwatCrawl.py","file_name":"dwatCrawl.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"93605878","text":"\"\"\"\n REST API Documentation for the NRS TFRS Credit Trading Application\n\n The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.\n\n OpenAPI spec version: v1\n \n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom django.conf.urls import url\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.schemas import SchemaGenerator\nfrom rest_framework.views import APIView\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom rest_framework_swagger import renderers\n# generated views\nfrom . import views\n# custom views\nfrom . import views_custom\n\nclass SwaggerSchemaView(APIView):\n permission_classes = [AllowAny]\n renderer_classes = [\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n _ignore_model_permissions = True\n exclude_from_schema = True \n def get(self, request):\n generator = SchemaGenerator()\n schema = generator.get_schema(request=request)\n return Response(schema)\n\nurlpatterns = [\n # Swagger documentation\n url(r'^$', SwaggerSchemaView.as_view()),\n url(r'^attachments/bulk$', views.attachmentsBulkPost.as_view()),\n url(r'^attachments$', views.attachmentsGet.as_view()),\n url(r'^attachments/(?P[0-9]+)/delete$', views.attachmentsIdDeletePost.as_view()),\n url(r'^attachments/(?P[0-9]+)/download$', views_custom.attachmentsIdDownloadGet.as_view()),\n url(r'^attachments/(?P[0-9]+)$', views.attachmentsIdGet.as_view()),\n url(r'^attachments/upload$', views_custom.attachmentsUploadPost.as_view()),\n url(r'^contacts/bulk$', views.contactsBulkPost.as_view()),\n url(r'^contacts$', views.contactsGet.as_view()),\n url(r'^contacts/(?P[0-9]+)/delete$', views.contactsIdDeletePost.as_view()),\n url(r'^contacts/(?P[0-9]+)$', views.contactsIdGet.as_view()),\n url(r'^credittrades/bulk$', views.credittradesBulkPost.as_view()),\n url(r'^credittrades$', views.credittradesGet.as_view()),\n url(r'^credittrades/(?P[0-9]+)/attachments$', views_custom.credittradesIdAttachmentsGet.as_view()),\n url(r'^credittrades/(?P[0-9]+)/delete$', views.credittradesIdDeletePost.as_view()),\n url(r'^credittrades/(?P[0-9]+)$', views.credittradesIdGet.as_view()),\n url(r'^credittrades/(?P[0-9]+)/history$', views_custom.credittradesIdHistoryGet.as_view()),\n url(r'^credittrades/(?P[0-9]+)/notes$', views_custom.credittradesIdNotesGet.as_view()),\n url(r'^credittrades/search$', views_custom.credittradesSearchGet.as_view()),\n url(r'^credittradetradelogentries/bulk$', views.credittradetradelogentriesBulkPost.as_view()),\n url(r'^credittradetradelogentries$', views.credittradetradelogentriesGet.as_view()),\n url(r'^credittradetradelogentries/(?P[0-9]+)/delete$', views.credittradetradelogentriesIdDeletePost.as_view()),\n url(r'^credittradetradelogentries/(?P[0-9]+)$', views.credittradetradelogentriesIdGet.as_view()),\n url(r'^users/current/favourites/(?P[0-9]+)/delete$', views_custom.usersCurrentFavouritesIdDeletePost.as_view()),\n url(r'^users/current/favourites$', views_custom.usersCurrentFavouritesPut.as_view()),\n url(r'^users/current/favourites/search$', views_custom.usersCurrentFavouritesSearchGet.as_view()),\n url(r'^users/current$', views_custom.usersCurrentGet.as_view()),\n url(r'^fuelsuppliers/bulk$', views.fuelsuppliersBulkPost.as_view()),\n url(r'^fuelsuppliers$', views.fuelsuppliersGet.as_view()),\n url(r'^fuelsuppliers/(?P[0-9]+)/attachments$', views_custom.fuelsuppliersIdAttachmentsGet.as_view()),\n url(r'^fuelsuppliers/(?P[0-9]+)/delete$', views.fuelsuppliersIdDeletePost.as_view()),\n url(r'^fuelsuppliers/(?P[0-9]+)$', views.fuelsuppliersIdGet.as_view()),\n url(r'^fuelsuppliers/(?P[0-9]+)/history$', views_custom.fuelsuppliersIdHistoryGet.as_view()),\n url(r'^fuelsuppliers/(?P[0-9]+)/notes$', views_custom.fuelsuppliersIdNotesGet.as_view()),\n url(r'^fuelsuppliers/search$', views_custom.fuelsuppliersSearchGet.as_view()),\n url(r'^groups/bulk$', views.groupsBulkPost.as_view()),\n url(r'^groups$', views.groupsGet.as_view()),\n url(r'^groups/(?P[0-9]+)/delete$', views.groupsIdDeletePost.as_view()),\n url(r'^groups/(?P[0-9]+)$', views.groupsIdGet.as_view()),\n url(r'^groups/(?P[0-9]+)/users$', views_custom.groupsIdUsersGet.as_view()),\n url(r'^groupmemberships/bulk$', views.groupmembershipsBulkPost.as_view()),\n url(r'^groupmemberships$', views.groupmembershipsGet.as_view()),\n url(r'^groupmemberships/(?P[0-9]+)/delete$', views.groupmembershipsIdDeletePost.as_view()),\n url(r'^groupmemberships/(?P[0-9]+)$', views.groupmembershipsIdGet.as_view()),\n url(r'^histories/bulk$', views.historiesBulkPost.as_view()),\n url(r'^histories$', views.historiesGet.as_view()),\n url(r'^histories/(?P[0-9]+)/delete$', views.historiesIdDeletePost.as_view()),\n url(r'^histories/(?P[0-9]+)$', views.historiesIdGet.as_view()),\n url(r'^lookuplists/bulk$', views.lookuplistsBulkPost.as_view()),\n url(r'^lookuplists$', views.lookuplistsGet.as_view()),\n url(r'^lookuplists/(?P[0-9]+)/delete$', views.lookuplistsIdDeletePost.as_view()),\n url(r'^lookuplists/(?P[0-9]+)$', views.lookuplistsIdGet.as_view()),\n url(r'^notes/bulk$', views.notesBulkPost.as_view()),\n url(r'^notes$', views.notesGet.as_view()),\n url(r'^notes/(?P[0-9]+)/delete$', views.notesIdDeletePost.as_view()),\n url(r'^notes/(?P[0-9]+)$', views.notesIdGet.as_view()),\n url(r'^notifications/bulk$', views.notificationsBulkPost.as_view()),\n url(r'^notifications$', views.notificationsGet.as_view()),\n url(r'^notifications/(?P[0-9]+)/delete$', views.notificationsIdDeletePost.as_view()),\n url(r'^notifications/(?P[0-9]+)$', views.notificationsIdGet.as_view()),\n url(r'^notificationevents/bulk$', views.notificationeventsBulkPost.as_view()),\n url(r'^notificationevents$', views.notificationeventsGet.as_view()),\n url(r'^notificationevents/(?P[0-9]+)/delete$', views.notificationeventsIdDeletePost.as_view()),\n url(r'^notificationevents/(?P[0-9]+)$', views.notificationeventsIdGet.as_view()),\n url(r'^offers/bulk$', views.offersBulkPost.as_view()),\n url(r'^offers$', views.offersGet.as_view()),\n url(r'^offers/(?P[0-9]+)/delete$', views.offersIdDeletePost.as_view()),\n url(r'^offers/(?P[0-9]+)$', views.offersIdGet.as_view()),\n url(r'^permissions/bulk$', views.permissionsBulkPost.as_view()),\n url(r'^permissions$', views.permissionsGet.as_view()),\n url(r'^permissions/(?P[0-9]+)/delete$', views.permissionsIdDeletePost.as_view()),\n url(r'^permissions/(?P[0-9]+)$', views.permissionsIdGet.as_view()),\n url(r'^roles/bulk$', views.rolesBulkPost.as_view()),\n url(r'^roles$', views.rolesGet.as_view()),\n url(r'^roles/(?P[0-9]+)/delete$', views.rolesIdDeletePost.as_view()),\n url(r'^roles/(?P[0-9]+)$', views.rolesIdGet.as_view()),\n url(r'^roles/(?P[0-9]+)/permissions$', views_custom.rolesIdPermissionsGet.as_view()),\n url(r'^roles/(?P[0-9]+)/users$', views_custom.rolesIdUsersGet.as_view()),\n url(r'^rolepermissions/bulk$', views.rolepermissionsBulkPost.as_view()),\n url(r'^rolepermissions$', views.rolepermissionsGet.as_view()),\n url(r'^rolepermissions/(?P[0-9]+)/delete$', views.rolepermissionsIdDeletePost.as_view()),\n url(r'^rolepermissions/(?P[0-9]+)$', views.rolepermissionsIdGet.as_view()),\n url(r'^users/bulk$', views.usersBulkPost.as_view()),\n url(r'^users$', views.usersGet.as_view()),\n url(r'^users/(?P[0-9]+)/delete$', views.usersIdDeletePost.as_view()),\n url(r'^users/(?P[0-9]+)/favourites$', views_custom.usersIdFavouritesGet.as_view()),\n url(r'^users/(?P[0-9]+)$', views.usersIdGet.as_view()),\n url(r'^users/(?P[0-9]+)/groups$', views_custom.usersIdGroupsGet.as_view()),\n url(r'^users/(?P[0-9]+)/notifications$', views_custom.usersIdNotificationsGet.as_view()),\n url(r'^users/(?P[0-9]+)/permissions$', views_custom.usersIdPermissionsGet.as_view()),\n url(r'^users/(?P[0-9]+)/roles$', views_custom.usersIdRolesGet.as_view()),\n url(r'^users/search$', views_custom.usersSearchGet.as_view()),\n url(r'^userroles/bulk$', views.userrolesBulkPost.as_view()),\n url(r'^userroles$', views.userrolesGet.as_view()),\n url(r'^userroles/(?P[0-9]+)/delete$', views.userrolesIdDeletePost.as_view()),\n url(r'^userroles/(?P[0-9]+)$', views.userrolesIdGet.as_view())\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","sub_path":"server/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":9251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"}